1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/shadow.h"
16 :
17 : #include <algorithm>
18 :
19 : #include "base/strings/stringprintf.h"
20 : #include "syzygy/common/align.h"
21 :
22 : namespace agent {
23 : namespace asan {
24 :
25 : uint8 Shadow::shadow_[kShadowSize] = {};
26 : uint8 Shadow::page_bits_[kPageBitsSize] = {};
27 E : base::Lock Shadow::page_bits_lock_;
28 :
29 E : void Shadow::SetUp() {
30 : // Poison the shadow memory.
31 E : Poison(shadow_, kShadowSize, kAsanMemoryMarker);
32 : // Poison the first 64k of the memory as they're not addressable.
33 E : Poison(0, kAddressLowerBound, kInvalidAddressMarker);
34 : // Poison the protection bits array.
35 E : Poison(page_bits_, kPageBitsSize, kAsanMemoryMarker);
36 E : }
37 :
38 E : void Shadow::TearDown() {
39 : // Unpoison the shadow memory.
40 E : Unpoison(shadow_, kShadowSize);
41 : // Unpoison the first 64k of the memory.
42 E : Unpoison(0, kAddressLowerBound);
43 : // Unpoison the protection bits array.
44 E : Unpoison(page_bits_, kPageBitsSize);
45 E : }
46 :
47 E : bool Shadow::IsClean() {
48 : static const size_t kInnacEnd = kAddressLowerBound >> kShadowRatioLog;
49 :
50 : static const size_t kShadowBegin =
51 E : reinterpret_cast<uintptr_t>(shadow_) >> kShadowRatioLog;
52 : static const size_t kShadowEnd =
53 E : reinterpret_cast<uintptr_t>(shadow_ + kShadowSize) >> kShadowRatioLog;
54 :
55 : static const size_t kPageBitsBegin =
56 E : reinterpret_cast<uintptr_t>(page_bits_) >> kShadowRatioLog;
57 : static const size_t kPageBitsEnd =
58 : reinterpret_cast<uintptr_t>(page_bits_ + kPageBitsSize) >>
59 E : kShadowRatioLog;
60 :
61 E : size_t i = 0;
62 E : for (; i < kInnacEnd; ++i) {
63 E : if (shadow_[i] != kInvalidAddressMarker)
64 i : return false;
65 E : }
66 :
67 E : for (; i < kShadowSize; ++i) {
68 : if ((i >= kShadowBegin && i < kShadowEnd) ||
69 E : (i >= kPageBitsBegin && i < kPageBitsEnd)) {
70 E : if (shadow_[i] != kAsanMemoryMarker)
71 i : return false;
72 E : } else {
73 E : if (shadow_[i] != kHeapAddressableMarker)
74 i : return false;
75 : }
76 E : }
77 :
78 E : return true;
79 E : }
80 :
81 E : void Shadow::Reset() {
82 E : ::memset(shadow_, 0, kShadowSize);
83 E : ::memset(page_bits_, 0, kPageBitsSize);
84 E : }
85 :
86 E : void Shadow::Poison(const void* addr, size_t size, ShadowMarker shadow_val) {
87 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
88 E : uintptr_t start = index & (kShadowRatio - 1);
89 E : DCHECK_EQ(0U, (index + size) & (kShadowRatio - 1));
90 :
91 E : index >>= kShadowRatioLog;
92 E : if (start)
93 E : shadow_[index++] = start;
94 :
95 E : size >>= kShadowRatioLog;
96 E : DCHECK_GT(arraysize(shadow_), index + size);
97 E : ::memset(shadow_ + index, shadow_val, size);
98 E : }
99 :
100 E : void Shadow::Unpoison(const void* addr, size_t size) {
101 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
102 E : DCHECK_EQ(0U, index & (kShadowRatio - 1));
103 :
104 E : uint8 remainder = size & (kShadowRatio - 1);
105 E : index >>= kShadowRatioLog;
106 E : size >>= kShadowRatioLog;
107 E : DCHECK_GT(arraysize(shadow_), index + size);
108 E : ::memset(shadow_ + index, kHeapAddressableMarker, size);
109 :
110 E : if (remainder != 0)
111 E : shadow_[index + size] = remainder;
112 E : }
113 :
114 : namespace {
115 :
116 : // An array of kFreedMarkers. This is used for constructing uint16, uint32 and
117 : // uint64 byte variants of kHeapFreedMarker.
118 : static const uint8 kFreedMarkers[] = {
119 : kHeapFreedMarker, kHeapFreedMarker, kHeapFreedMarker, kHeapFreedMarker,
120 : kHeapFreedMarker, kHeapFreedMarker, kHeapFreedMarker, kHeapFreedMarker };
121 : COMPILE_ASSERT(sizeof(kFreedMarkers) == sizeof(uint64),
122 : wrong_number_of_freed_markers);
123 : static const uint64& kFreedMarker64 =
124 : *reinterpret_cast<const uint64*>(kFreedMarkers);
125 : static const uint32& kFreedMarker32 =
126 : *reinterpret_cast<const uint32*>(kFreedMarkers);
127 : static const uint16& kFreedMarker16 =
128 E : *reinterpret_cast<const uint16*>(kFreedMarkers);
129 : static const uint8& kFreedMarker8 =
130 E : *reinterpret_cast<const uint8*>(kFreedMarkers);
131 :
132 : // Marks the given range of shadow bytes as freed, preserving left and right
133 : // redzone bytes.
134 E : inline void MarkAsFreedImpl8(uint8* cursor, uint8* cursor_end) {
135 E : for (; cursor != cursor_end; ++cursor) {
136 : // Preserve block beginnings/ends/redzones as they were originally.
137 : // This is necessary to preserve information about nested blocks.
138 : if (ShadowMarkerHelper::IsActiveLeftRedzone(*cursor) ||
139 E : ShadowMarkerHelper::IsActiveRightRedzone(*cursor)) {
140 E : continue;
141 : }
142 :
143 : // Anything else gets marked as freed.
144 E : *cursor = kHeapFreedMarker;
145 E : }
146 E : }
147 :
148 : // Marks the given range of shadow bytes as freed, preserving left and right
149 : // redzone bytes. |cursor| and |cursor_end| must be 8-byte aligned.
150 E : inline void MarkAsFreedImplAligned64(uint64* cursor, uint64* cursor_end) {
151 E : DCHECK(::common::IsAligned(cursor, sizeof(uint64)));
152 E : DCHECK(::common::IsAligned(cursor_end, sizeof(uint64)));
153 :
154 E : for (; cursor != cursor_end; ++cursor) {
155 : // If the block of shadow memory is entirely green then mark as freed.
156 : // Otherwise go check its contents byte by byte.
157 E : if (*cursor == 0) {
158 E : *cursor = kFreedMarker64;
159 E : } else {
160 : MarkAsFreedImpl8(reinterpret_cast<uint8*>(cursor),
161 E : reinterpret_cast<uint8*>(cursor + 1));
162 : }
163 E : }
164 E : }
165 :
166 E : inline void MarkAsFreedImpl64(uint8* cursor, uint8* cursor_end) {
167 E : if (cursor_end - cursor >= 2 * sizeof(uint64)) {
168 E : uint8* cursor_aligned = ::common::AlignUp(cursor, sizeof(uint64));
169 : uint8* cursor_end_aligned = ::common::AlignDown(cursor_end,
170 E : sizeof(uint64));
171 E : MarkAsFreedImpl8(cursor, cursor_aligned);
172 : MarkAsFreedImplAligned64(reinterpret_cast<uint64*>(cursor_aligned),
173 E : reinterpret_cast<uint64*>(cursor_end_aligned));
174 E : MarkAsFreedImpl8(cursor_end_aligned, cursor_end);
175 E : } else {
176 E : MarkAsFreedImpl8(cursor, cursor_end);
177 : }
178 E : }
179 :
180 : } // namespace
181 :
182 E : void Shadow::MarkAsFreed(const void* addr, size_t size) {
183 E : DCHECK_LE(kAddressLowerBound, reinterpret_cast<uintptr_t>(addr));
184 E : DCHECK(::common::IsAligned(addr, kShadowRatio));
185 E : size_t index = reinterpret_cast<uintptr_t>(addr) / kShadowRatio;
186 E : size_t length = (size + kShadowRatio - 1) / kShadowRatio;
187 :
188 E : DCHECK_LE(index, kShadowSize);
189 E : DCHECK_LE(index + length, kShadowSize);
190 :
191 E : uint8* cursor = shadow_ + index;
192 E : uint8* cursor_end = static_cast<uint8*>(cursor) + length;
193 :
194 : // This isn't as simple as a memset because we need to preserve left and
195 : // right redzone padding bytes that may be found in the range.
196 E : MarkAsFreedImpl64(cursor, cursor_end);
197 E : }
198 :
199 E : bool Shadow::IsAccessible(const void* addr) {
200 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
201 E : uintptr_t start = index & 0x7;
202 :
203 E : index >>= 3;
204 :
205 E : DCHECK_GT(arraysize(shadow_), index);
206 E : uint8 shadow = shadow_[index];
207 E : if (shadow == 0)
208 E : return true;
209 :
210 E : if (ShadowMarkerHelper::IsRedzone(shadow))
211 E : return false;
212 :
213 E : return start < shadow;
214 E : }
215 :
216 E : bool Shadow::IsLeftRedzone(const void* address) {
217 : return ShadowMarkerHelper::IsActiveLeftRedzone(
218 E : GetShadowMarkerForAddress(address));
219 E : }
220 :
221 E : bool Shadow::IsRightRedzone(const void* address) {
222 E : uintptr_t index = reinterpret_cast<uintptr_t>(address);
223 E : uintptr_t start = index & 0x7;
224 :
225 E : index >>= 3;
226 :
227 E : DCHECK_GT(arraysize(shadow_), index);
228 E : uint8 marker = shadow_[index];
229 :
230 : // If the marker is for accessible memory then some addresses may be part
231 : // of a right redzone, assuming that the *next* marker in the shadow is for
232 : // a right redzone.
233 E : if (marker == 0)
234 E : return false;
235 E : if (marker <= kHeapPartiallyAddressableByte7) {
236 E : if (index == arraysize(shadow_))
237 i : return false;
238 E : if (!ShadowMarkerHelper::IsActiveRightRedzone(shadow_[index + 1]))
239 i : return false;
240 E : return start >= marker;
241 : }
242 :
243 : // Otherwise, check the marker directly.
244 E : return ShadowMarkerHelper::IsActiveRightRedzone(marker);
245 E : }
246 :
247 : bool Shadow::IsBlockStartByte(const void* address) {
248 : uintptr_t index = reinterpret_cast<uintptr_t>(address);
249 : uintptr_t start = index & 0x7;
250 :
251 : index >>= 3;
252 :
253 : DCHECK_GT(arraysize(shadow_), index);
254 : uint8 marker = shadow_[index];
255 :
256 : if (start != 0)
257 : return false;
258 : if (!ShadowMarkerHelper::IsActiveBlockStart(marker))
259 : return false;
260 :
261 : return true;
262 : }
263 :
264 E : ShadowMarker Shadow::GetShadowMarkerForAddress(const void* addr) {
265 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
266 E : index >>= 3;
267 :
268 E : DCHECK_GT(arraysize(shadow_), index);
269 E : return static_cast<ShadowMarker>(shadow_[index]);
270 E : }
271 :
272 E : void Shadow::PoisonAllocatedBlock(const BlockInfo& info) {
273 : COMPILE_ASSERT((sizeof(BlockHeader) % kShadowRatio) == 0, bad_header_size);
274 E : DCHECK(info.header->state == ALLOCATED_BLOCK);
275 :
276 : // Translate the block address to an offset. Sanity check a whole bunch
277 : // of things that we require to be true for the shadow to have 100%
278 : // fidelity.
279 E : uintptr_t index = reinterpret_cast<uintptr_t>(info.block);
280 E : DCHECK(::common::IsAligned(index, kShadowRatio));
281 E : DCHECK(::common::IsAligned(info.header_padding_size, kShadowRatio));
282 E : DCHECK(::common::IsAligned(info.block_size, kShadowRatio));
283 E : index /= kShadowRatio;
284 :
285 : // Determine the distribution of bytes in the shadow.
286 E : size_t left_redzone_bytes = (info.body - info.block) / kShadowRatio;
287 E : size_t body_bytes = (info.body_size + kShadowRatio - 1) / kShadowRatio;
288 E : size_t block_bytes = info.block_size / kShadowRatio;
289 E : size_t right_redzone_bytes = block_bytes - left_redzone_bytes - body_bytes;
290 :
291 : // Determine the marker byte for the header. This encodes the length of the
292 : // body of the allocation modulo the shadow ratio, so that the exact length
293 : // can be inferred from inspecting the shadow memory.
294 E : uint8 body_size_mod = info.body_size % kShadowRatio;
295 : uint8 header_marker = ShadowMarkerHelper::BuildBlockStart(
296 E : true, info.header->is_nested, body_size_mod);
297 :
298 : // Determine the marker byte for the trailer.
299 : uint8 trailer_marker = ShadowMarkerHelper::BuildBlockEnd(
300 E : true, info.header->is_nested);
301 :
302 : // Poison the header and left padding.
303 E : uint8* cursor = shadow_ + index;
304 E : ::memset(cursor, header_marker, 1);
305 E : ::memset(cursor + 1, kHeapLeftPaddingMarker, left_redzone_bytes - 1);
306 E : cursor += left_redzone_bytes;
307 E : ::memset(cursor, kHeapAddressableMarker, body_bytes);
308 E : cursor += body_bytes;
309 :
310 : // Poison the right padding and the trailer.
311 E : if (body_size_mod > 0)
312 E : cursor[-1] = body_size_mod;
313 E : ::memset(cursor, kHeapRightPaddingMarker, right_redzone_bytes - 1);
314 E : ::memset(cursor + right_redzone_bytes - 1, trailer_marker, 1);
315 E : }
316 :
317 E : bool Shadow::BlockIsNested(const BlockInfo& info) {
318 E : uint8 marker = GetShadowMarkerForAddress(info.block);
319 E : DCHECK(ShadowMarkerHelper::IsActiveBlockStart(marker));
320 E : return ShadowMarkerHelper::IsNestedBlockStart(marker);
321 E : }
322 :
323 E : bool Shadow::BlockInfoFromShadow(const void* addr, CompactBlockInfo* info) {
324 E : DCHECK_NE(static_cast<void*>(NULL), addr);
325 E : DCHECK_NE(static_cast<CompactBlockInfo*>(NULL), info);
326 E : if (!BlockInfoFromShadowImpl(0, addr, info))
327 E : return false;
328 E : return true;
329 E : }
330 :
331 E : bool Shadow::BlockInfoFromShadow(const void* addr, BlockInfo* info) {
332 E : DCHECK_NE(static_cast<void*>(NULL), addr);
333 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), info);
334 E : CompactBlockInfo compact = {};
335 E : if (!BlockInfoFromShadow(addr, &compact))
336 E : return false;
337 E : ConvertBlockInfo(compact, info);
338 E : return true;
339 E : }
340 :
341 : bool Shadow::ParentBlockInfoFromShadow(const BlockInfo& nested,
342 E : BlockInfo* info) {
343 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), info);
344 E : if (!BlockIsNested(nested))
345 E : return false;
346 E : CompactBlockInfo compact = {};
347 E : if (!BlockInfoFromShadowImpl(1, nested.block, &compact))
348 i : return false;
349 E : ConvertBlockInfo(compact, info);
350 E : return true;
351 E : }
352 :
353 E : bool Shadow::IsBeginningOfBlockBody(const void* addr) {
354 E : DCHECK_NE(static_cast<void*>(NULL), addr);
355 : // If the block has a non-zero body size then the beginning of the body will
356 : // be accessible or tagged as freed.
357 : // If the block has an empty body then the beginning of the body will be a
358 : // right redzone.
359 : if (IsAccessible(addr) || IsRightRedzone(addr) ||
360 E : GetShadowMarkerForAddress(addr) == kHeapFreedMarker) {
361 E : return IsLeftRedzone(reinterpret_cast<const uint8*>(addr) - 1);
362 : }
363 E : return false;
364 E : }
365 :
366 : namespace {
367 :
368 E : static const size_t kPageSize = GetPageSize();
369 :
370 : // Converts an address to a page index and bit mask.
371 : inline void AddressToPageMask(const void* address,
372 : size_t* index,
373 E : uint8* mask) {
374 E : DCHECK_NE(static_cast<size_t*>(nullptr), index);
375 E : DCHECK_NE(static_cast<uint8*>(nullptr), mask);
376 :
377 E : size_t i = reinterpret_cast<uintptr_t>(address) / kPageSize;
378 E : *index = i / 8;
379 E : *mask = 1 << (i % 8);
380 E : }
381 :
382 : } // namespace
383 :
384 E : bool Shadow::PageIsProtected(const void* addr) {
385 : // Since the page bit is read very frequently this is not performed
386 : // under a lock. The values change quite rarely, so this will almost always
387 : // be correct. However, consumers of this knowledge have to be robust to
388 : // getting incorrect data.
389 E : size_t index = 0;
390 E : uint8 mask = 0;
391 E : AddressToPageMask(addr, &index, &mask);
392 E : return (page_bits_[index] & mask) == mask;
393 E : }
394 :
395 E : void Shadow::MarkPageProtected(const void* addr) {
396 E : size_t index = 0;
397 E : uint8 mask = 0;
398 E : AddressToPageMask(addr, &index, &mask);
399 :
400 E : base::AutoLock lock(page_bits_lock_);
401 E : page_bits_[index] |= mask;
402 E : }
403 :
404 E : void Shadow::MarkPageUnprotected(const void* addr) {
405 E : size_t index = 0;
406 E : uint8 mask = 0;
407 E : AddressToPageMask(addr, &index, &mask);
408 E : mask = ~mask;
409 :
410 E : base::AutoLock lock(page_bits_lock_);
411 E : page_bits_[index] &= mask;
412 E : }
413 :
414 E : void Shadow::MarkPagesProtected(const void* addr, size_t size) {
415 E : const uint8* page = reinterpret_cast<const uint8*>(addr);
416 E : const uint8* page_end = page + size;
417 E : size_t index = 0;
418 E : uint8 mask = 0;
419 :
420 E : base::AutoLock lock(page_bits_lock_);
421 E : while (page < page_end) {
422 E : AddressToPageMask(page, &index, &mask);
423 E : page_bits_[index] |= mask;
424 E : page += kPageSize;
425 E : }
426 E : }
427 :
428 E : void Shadow::MarkPagesUnprotected(const void* addr, size_t size) {
429 E : const uint8* page = reinterpret_cast<const uint8*>(addr);
430 E : const uint8* page_end = page + size;
431 E : size_t index = 0;
432 E : uint8 mask = 0;
433 :
434 E : base::AutoLock lock(page_bits_lock_);
435 E : while (page < page_end) {
436 E : AddressToPageMask(page, &index, &mask);
437 E : page_bits_[index] &= ~mask;
438 E : page += kPageSize;
439 E : }
440 E : }
441 :
442 : void Shadow::CloneShadowRange(const void* src_pointer,
443 : void* dst_pointer,
444 : size_t size) {
445 : DCHECK_EQ(0U, size & 0x7);
446 :
447 : uintptr_t src_index = reinterpret_cast<uintptr_t>(src_pointer);
448 : DCHECK_EQ(0U, src_index & 0x7);
449 : src_index >>= 3;
450 :
451 : uintptr_t dst_index = reinterpret_cast<uintptr_t>(dst_pointer);
452 : DCHECK_EQ(0U, dst_index & 0x7);
453 : dst_index >>= 3;
454 :
455 : size_t size_shadow = size >> 3;
456 :
457 : memcpy(shadow_ + dst_index, shadow_ + src_index, size_shadow);
458 : }
459 :
460 : void Shadow::AppendShadowByteText(const char *prefix,
461 : uintptr_t index,
462 : std::string* output,
463 E : size_t bug_index) {
464 : base::StringAppendF(
465 E : output, "%s0x%08x:", prefix, reinterpret_cast<void*>(index << 3));
466 E : char separator = ' ';
467 E : for (uint32 i = 0; i < kShadowBytesPerLine; i++) {
468 E : if (index + i == bug_index)
469 E : separator = '[';
470 E : uint8 shadow_value = shadow_[index + i];
471 : base::StringAppendF(
472 E : output, "%c%x%x", separator, shadow_value >> 4, shadow_value & 15);
473 E : if (separator == '[')
474 E : separator = ']';
475 E : else if (separator == ']')
476 E : separator = ' ';
477 E : }
478 E : if (separator == ']')
479 E : base::StringAppendF(output, "]");
480 E : base::StringAppendF(output, "\n");
481 E : }
482 :
483 E : void Shadow::AppendShadowArrayText(const void* addr, std::string* output) {
484 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
485 E : index >>= 3;
486 E : size_t index_start = index;
487 E : index_start /= kShadowBytesPerLine;
488 E : index_start *= kShadowBytesPerLine;
489 E : for (int i = -static_cast<int>(kShadowContextLines);
490 E : i <= static_cast<int>(kShadowContextLines); i++) {
491 E : const char * const prefix = (i == 0) ? "=>" : " ";
492 : AppendShadowByteText(
493 E : prefix, (index_start + i * kShadowBytesPerLine), output, index);
494 E : }
495 E : }
496 :
497 E : void Shadow::AppendShadowMemoryText(const void* addr, std::string* output) {
498 E : base::StringAppendF(output, "Shadow bytes around the buggy address:\n");
499 E : AppendShadowArrayText(addr, output);
500 : base::StringAppendF(output, "Shadow byte legend (one shadow byte represents "
501 E : "8 application bytes):\n");
502 E : base::StringAppendF(output, " Addressable: 00\n");
503 E : base::StringAppendF(output, " Partially addressable: 01 - 07\n");
504 : base::StringAppendF(output, " Block start redzone: %02x - %02x\n",
505 E : kHeapBlockStartMarker0, kHeapBlockStartMarker7);
506 : base::StringAppendF(output, " Nested block start: %02x - %02x\n",
507 : kHeapNestedBlockStartMarker0,
508 E : kHeapNestedBlockStartMarker7);
509 : base::StringAppendF(output, " Asan memory byte: %02x\n",
510 E : kAsanMemoryMarker);
511 : base::StringAppendF(output, " Invalid address: %02x\n",
512 E : kInvalidAddressMarker);
513 : base::StringAppendF(output, " User redzone: %02x\n",
514 E : kUserRedzoneMarker);
515 : base::StringAppendF(output, " Block end redzone: %02x\n",
516 E : kHeapBlockEndMarker);
517 : base::StringAppendF(output, " Nested block end: %02x\n",
518 E : kHeapNestedBlockEndMarker);
519 : base::StringAppendF(output, " Heap left redzone: %02x\n",
520 E : kHeapLeftPaddingMarker);
521 : base::StringAppendF(output, " Heap right redzone: %02x\n",
522 E : kHeapRightPaddingMarker);
523 : base::StringAppendF(output, " Asan reserved byte: %02x\n",
524 E : kAsanReservedMarker);
525 : base::StringAppendF(output, " Freed heap region: %02x\n",
526 E : kHeapFreedMarker);
527 E : }
528 :
529 : size_t Shadow::GetAllocSize(const uint8* mem) {
530 : BlockInfo block_info = {};
531 : if (!Shadow::BlockInfoFromShadow(mem, &block_info))
532 : return 0;
533 : return block_info.block_size;
534 : }
535 :
536 : bool Shadow::ScanLeftForBracketingBlockStart(
537 E : size_t initial_nesting_depth, size_t cursor, size_t* location) {
538 E : DCHECK_NE(static_cast<size_t*>(NULL), location);
539 :
540 : static const size_t kLowerBound = kAddressLowerBound / kShadowRatio;
541 :
542 E : size_t left = cursor;
543 E : int nesting_depth = static_cast<int>(initial_nesting_depth);
544 E : if (ShadowMarkerHelper::IsBlockEnd(shadow_[left]))
545 E : --nesting_depth;
546 E : while (true) {
547 E : if (ShadowMarkerHelper::IsBlockStart(shadow_[left])) {
548 E : if (nesting_depth == 0) {
549 E : *location = left;
550 E : return true;
551 : }
552 : // If this is not a nested block then there's no hope of finding a
553 : // block containing the original cursor.
554 E : if (!ShadowMarkerHelper::IsNestedBlockStart(shadow_[left]))
555 E : return false;
556 E : --nesting_depth;
557 E : } else if (ShadowMarkerHelper::IsBlockEnd(shadow_[left])) {
558 E : ++nesting_depth;
559 :
560 : // If we encounter the end of a non-nested block there's no way for
561 : // a block to bracket us.
562 : if (nesting_depth > 0 &&
563 E : !ShadowMarkerHelper::IsNestedBlockEnd(shadow_[left])) {
564 E : return false;
565 : }
566 : }
567 E : if (left <= kLowerBound)
568 E : return false;
569 E : --left;
570 E : }
571 :
572 i : NOTREACHED();
573 E : }
574 :
575 : namespace {
576 :
577 : // This handles an unaligned input cursor. It can potentially read up to 7
578 : // bytes past the end of the cursor, but only up to an 8 byte boundary. Thus
579 : // this out of bounds access is safe.
580 : inline const uint8* ScanRightForPotentialHeaderBytes(
581 E : const uint8* pos, const uint8* end) {
582 E : DCHECK(::common::IsAligned(end, 8));
583 :
584 : // Handle the first few bytes that aren't aligned. If pos == end then
585 : // it is already 8-byte aligned and we'll simply fall through to the end.
586 : // This is a kind of Duffy's device that takes bytes 'as large as possible'
587 : // until we reach an 8 byte alignment.
588 E : switch (reinterpret_cast<uintptr_t>(pos) & 0x7) {
589 : case 1:
590 E : if (*pos != 0 && *pos != kFreedMarker8)
591 E : return pos;
592 E : pos += 1;
593 : case 2:
594 : if (*reinterpret_cast<const uint16*>(pos) != 0 &&
595 E : *reinterpret_cast<const uint16*>(pos) != kFreedMarker16) {
596 E : return pos;
597 : }
598 E : pos += 2;
599 : case 4:
600 : if (*reinterpret_cast<const uint32*>(pos) != 0 &&
601 E : *reinterpret_cast<const uint32*>(pos) != kFreedMarker32) {
602 E : return pos;
603 : }
604 E : pos += 4;
605 E : break;
606 :
607 : case 3:
608 E : if (*pos != 0 && *pos != kFreedMarker8)
609 E : return pos;
610 E : pos += 1;
611 : // Now have alignemnt of 4.
612 : if (*reinterpret_cast<const uint32*>(pos) != 0 &&
613 E : *reinterpret_cast<const uint32*>(pos) != kFreedMarker32) {
614 E : return pos;
615 : }
616 E : pos += 4;
617 E : break;
618 :
619 : case 5:
620 E : if (*pos != 0 && *pos != kFreedMarker8)
621 E : return pos;
622 E : pos += 1;
623 : case 6:
624 : if (*reinterpret_cast<const uint16*>(pos) != 0 &&
625 E : *reinterpret_cast<const uint16*>(pos) != kFreedMarker16) {
626 E : return pos;
627 : }
628 E : pos += 2;
629 E : break;
630 :
631 : case 7:
632 E : if (*pos != 0 && *pos != kFreedMarker8)
633 E : return pos;
634 E : pos += 1;
635 :
636 : case 0:
637 : default:
638 : // Do nothing, as we're already 8 byte aligned.
639 : break;
640 : }
641 :
642 : // Handle the 8-byte aligned bytes as much as we can.
643 E : while (pos < end) {
644 : if (*reinterpret_cast<const uint64*>(pos) != 0 &&
645 E : *reinterpret_cast<const uint64*>(pos) != kFreedMarker64) {
646 E : return pos;
647 : }
648 E : pos += 8;
649 E : }
650 :
651 i : return pos;
652 E : }
653 :
654 : } // namespace
655 :
656 : bool Shadow::ScanRightForBracketingBlockEnd(
657 E : size_t initial_nesting_depth, size_t cursor, size_t* location) {
658 E : DCHECK_NE(static_cast<size_t*>(NULL), location);
659 : static const uint8* kShadowEnd = Shadow::shadow_ + Shadow::kShadowSize;
660 :
661 E : const uint8* pos = shadow_ + cursor;
662 E : int nesting_depth = static_cast<int>(initial_nesting_depth);
663 E : if (ShadowMarkerHelper::IsBlockStart(*pos))
664 E : --nesting_depth;
665 E : while (pos < kShadowEnd) {
666 : // Skips past as many addressable and freed bytes as possible.
667 E : pos = ScanRightForPotentialHeaderBytes(pos, kShadowEnd);
668 E : if (pos == kShadowEnd)
669 i : return false;
670 :
671 : // When the above loop exits early then somewhere in the next 8 bytes
672 : // there's non-addressable data that isn't 'freed'. Look byte by byte to
673 : // see what's up.
674 :
675 E : if (ShadowMarkerHelper::IsBlockEnd(*pos)) {
676 E : if (nesting_depth == 0) {
677 E : *location = pos - shadow_;
678 E : return true;
679 : }
680 E : if (!ShadowMarkerHelper::IsNestedBlockEnd(*pos))
681 E : return false;
682 E : --nesting_depth;
683 E : } else if (ShadowMarkerHelper::IsBlockStart(*pos)) {
684 E : ++nesting_depth;
685 :
686 : // If we encounter the beginning of a non-nested block then there's
687 : // clearly no way for any block to bracket us.
688 : if (nesting_depth > 0 &&
689 E : !ShadowMarkerHelper::IsNestedBlockStart(*pos)) {
690 E : return false;
691 : }
692 : }
693 E : ++pos;
694 E : }
695 i : return false;
696 E : }
697 :
698 : bool Shadow::BlockInfoFromShadowImpl(
699 E : size_t initial_nesting_depth, const void* addr, CompactBlockInfo* info) {
700 E : DCHECK_NE(static_cast<void*>(NULL), addr);
701 E : DCHECK_NE(static_cast<CompactBlockInfo*>(NULL), info);
702 :
703 : // Convert the address to an offset in the shadow memory.
704 E : size_t left = reinterpret_cast<uintptr_t>(addr) / kShadowRatio;
705 E : size_t right = left;
706 :
707 E : if (!ScanLeftForBracketingBlockStart(initial_nesting_depth, left, &left))
708 E : return false;
709 E : if (!ScanRightForBracketingBlockEnd(initial_nesting_depth, right, &right))
710 i : return false;
711 E : ++right;
712 :
713 E : info->block = reinterpret_cast<uint8*>(left * kShadowRatio);
714 E : info->block_size = (right - left) * kShadowRatio;
715 :
716 : // Get the length of the body modulo the shadow ratio.
717 E : size_t body_size_mod = ShadowMarkerHelper::GetBlockStartData(shadow_[left]);
718 E : info->is_nested = ShadowMarkerHelper::IsNestedBlockStart(shadow_[left]);
719 :
720 : // Find the beginning of the body (end of the left redzone).
721 E : ++left;
722 E : while (left < right && shadow_[left] == kHeapLeftPaddingMarker)
723 E : ++left;
724 :
725 : // Find the beginning of the right redzone (end of the body).
726 E : --right;
727 E : while (right > left && shadow_[right - 1] == kHeapRightPaddingMarker)
728 E : --right;
729 :
730 : // Calculate the body location and size.
731 E : uint8* body = reinterpret_cast<uint8*>(left * kShadowRatio);
732 E : size_t body_size = (right - left) * kShadowRatio;
733 E : if (body_size_mod > 0) {
734 E : DCHECK_LE(8u, body_size);
735 E : body_size = body_size - kShadowRatio + body_size_mod;
736 : }
737 :
738 : // Fill out header and trailer sizes.
739 E : info->header_size = body - info->block;
740 E : info->trailer_size = info->block_size - body_size - info->header_size;
741 :
742 E : return true;
743 E : }
744 :
745 : ShadowWalker::ShadowWalker(
746 : bool recursive, const void* lower_bound, const void* upper_bound)
747 : : recursive_(recursive), lower_bound_(0), upper_bound_(0), cursor_(0),
748 E : nesting_depth_(0) {
749 E : DCHECK_LE(Shadow::kAddressLowerBound, reinterpret_cast<size_t>(lower_bound));
750 E : DCHECK_GE(Shadow::kAddressUpperBound, reinterpret_cast<size_t>(upper_bound));
751 E : DCHECK_LE(lower_bound, upper_bound);
752 :
753 : lower_bound_ = ::common::AlignDown(
754 E : reinterpret_cast<const uint8*>(lower_bound), kShadowRatio);
755 : upper_bound_ = ::common::AlignUp(
756 E : reinterpret_cast<const uint8*>(upper_bound), kShadowRatio);
757 E : Reset();
758 E : }
759 :
760 E : void ShadowWalker::Reset() {
761 : // Walk to the beginning of the first non-nested block, or to the end
762 : // of the range, whichever comes first.
763 E : nesting_depth_ = -1;
764 E : for (cursor_ = lower_bound_; cursor_ != upper_bound_;
765 E : cursor_ += kShadowRatio) {
766 E : uint8 marker = Shadow::GetShadowMarkerForAddress(cursor_);
767 : if (ShadowMarkerHelper::IsBlockStart(marker) &&
768 E : !ShadowMarkerHelper::IsNestedBlockStart(marker)) {
769 E : break;
770 : }
771 E : }
772 E : }
773 :
774 E : bool ShadowWalker::Next(BlockInfo* info) {
775 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), info);
776 :
777 : // Iterate until a reportable block is encountered, or the slab is exhausted.
778 E : for (; cursor_ != upper_bound_; cursor_ += kShadowRatio) {
779 E : uint8 marker = Shadow::GetShadowMarkerForAddress(cursor_);
780 :
781 : // Update the nesting depth when block end markers are encountered.
782 E : if (ShadowMarkerHelper::IsBlockEnd(marker)) {
783 E : DCHECK_LE(0, nesting_depth_);
784 E : --nesting_depth_;
785 E : continue;
786 : }
787 :
788 : // Look for a block start marker.
789 E : if (ShadowMarkerHelper::IsBlockStart(marker)) {
790 : // Update the nesting depth when block start bytes are encountered.
791 E : ++nesting_depth_;
792 :
793 : // Non-nested blocks should only be encountered at depth 0.
794 E : bool is_nested = ShadowMarkerHelper::IsNestedBlockStart(marker);
795 E : DCHECK(is_nested || nesting_depth_ == 0);
796 :
797 : // Determine if the block is to be reported.
798 E : if (!is_nested || recursive_) {
799 : // This can only fail if the shadow memory is malformed.
800 E : CHECK(Shadow::BlockInfoFromShadow(cursor_, info));
801 :
802 : // In a recursive descent we have to process body contents.
803 E : if (recursive_) {
804 E : cursor_ += kShadowRatio;
805 E : } else {
806 : // Otherwise we can skip the body of the block we just reported.
807 : // We skip directly to the end marker (but not past it so that depth
808 : // bookkeeping works properly).
809 E : cursor_ += info->block_size - kShadowRatio;
810 : }
811 E : return true;
812 : }
813 i : continue;
814 : }
815 E : }
816 :
817 E : return false;
818 E : }
819 :
820 : } // namespace asan
821 : } // namespace agent
|