1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/shadow.h"
16 :
17 : #include <windows.h>
18 : #include <algorithm>
19 :
20 : #include "base/strings/stringprintf.h"
21 : #include "base/win/pe_image.h"
22 : #include "syzygy/common/align.h"
23 :
24 : namespace agent {
25 : namespace asan {
26 :
27 : namespace {
28 :
29 E : static const size_t kPageSize = GetPageSize();
30 :
31 : // Converts an address to a page index and bit mask.
32 : inline void AddressToPageMask(const void* address,
33 : size_t* index,
34 E : uint8* mask) {
35 E : DCHECK_NE(static_cast<size_t*>(nullptr), index);
36 E : DCHECK_NE(static_cast<uint8*>(nullptr), mask);
37 :
38 E : size_t i = reinterpret_cast<uintptr_t>(address) / kPageSize;
39 E : *index = i / 8;
40 E : *mask = 1 << (i % 8);
41 E : }
42 :
43 : } // namespace
44 :
45 E : Shadow::Shadow() : own_memory_(false), shadow_(nullptr), length_(0) {
46 E : Init(RequiredLength());
47 E : }
48 :
49 : Shadow::Shadow(size_t length)
50 E : : own_memory_(false), shadow_(nullptr), length_(0) {
51 E : Init(length);
52 E : }
53 :
54 : Shadow::Shadow(void* shadow, size_t length)
55 E : : own_memory_(false), shadow_(nullptr), length_(0) {
56 E : Init(false, shadow, length);
57 E : }
58 :
59 E : Shadow::~Shadow() {
60 E : if (own_memory_)
61 E : CHECK(::VirtualFree(shadow_, 0, MEM_RELEASE));
62 E : own_memory_ = false;
63 E : shadow_ = nullptr;
64 E : length_ = 0;
65 E : }
66 :
67 : // static
68 E : size_t Shadow::RequiredLength() {
69 E : MEMORYSTATUSEX mem_status = {};
70 E : mem_status.dwLength = sizeof(mem_status);
71 E : CHECK(::GlobalMemoryStatusEx(&mem_status));
72 :
73 : // Because of the way the interceptors work we only support 2GB or 4GB
74 : // virtual memory sizes, even if the actual is 3GB (32-bit windows, LAA,
75 : // and 4GT kernel option enabled).
76 : uint64 mem_size = ::common::AlignUp64(mem_status.ullTotalVirtual,
77 E : 2UL << 30); // 2GB.
78 :
79 E : return mem_size >> kShadowRatioLog;
80 E : }
81 :
82 E : void Shadow::SetUp() {
83 : // Poison the shadow object itself.
84 E : const void* self = nullptr;
85 E : size_t self_size = 0;
86 E : GetPointerAndSize(&self, &self_size);
87 E : DCHECK(::common::IsAligned(self, kShadowRatio));
88 E : DCHECK(::common::IsAligned(self_size, kShadowRatio));
89 E : Poison(self, self_size, kAsanMemoryMarker);
90 :
91 : // Poison the shadow memory.
92 E : Poison(shadow_, length_, kAsanMemoryMarker);
93 : // Poison the first 64k of the memory as they're not addressable.
94 E : Poison(0, kAddressLowerBound, kInvalidAddressMarker);
95 : // Poison the protection bits array.
96 E : Poison(page_bits_.data(), page_bits_.size(), kAsanMemoryMarker);
97 E : }
98 :
99 E : void Shadow::TearDown() {
100 : // Unpoison the shadow object itself.
101 E : const void* self = nullptr;
102 E : size_t self_size = 0;
103 E : GetPointerAndSize(&self, &self_size);
104 E : DCHECK(::common::IsAligned(self, kShadowRatio));
105 E : DCHECK(::common::IsAligned(self_size, kShadowRatio));
106 E : Unpoison(self, self_size);
107 :
108 : // Unpoison the shadow memory.
109 E : Unpoison(shadow_, length_);
110 : // Unpoison the first 64k of the memory.
111 E : Unpoison(0, kAddressLowerBound);
112 : // Unpoison the protection bits array.
113 E : Unpoison(page_bits_.data(), page_bits_.size());
114 E : }
115 :
116 E : bool Shadow::IsClean() const {
117 E : const size_t innac_end = kAddressLowerBound >> kShadowRatioLog;
118 :
119 : const size_t shadow_begin =
120 E : reinterpret_cast<uintptr_t>(shadow_) >> kShadowRatioLog;
121 : const size_t shadow_end =
122 E : reinterpret_cast<uintptr_t>(shadow_ + length_) >> kShadowRatioLog;
123 :
124 : const size_t page_bits_begin =
125 E : reinterpret_cast<uintptr_t>(page_bits_.data()) >> kShadowRatioLog;
126 : const size_t page_bits_end =
127 : reinterpret_cast<uintptr_t>(page_bits_.data() + page_bits_.size()) >>
128 E : kShadowRatioLog;
129 :
130 E : void const* self = nullptr;
131 E : size_t self_size = 0;
132 E : GetPointerAndSize(&self, &self_size);
133 : const size_t this_begin =
134 E : reinterpret_cast<uintptr_t>(self) >> kShadowRatioLog;
135 : const size_t this_end =
136 : (reinterpret_cast<uintptr_t>(self) + self_size + kShadowRatio - 1) >>
137 E : kShadowRatioLog;
138 :
139 E : size_t i = 0;
140 E : for (; i < innac_end; ++i) {
141 E : if (shadow_[i] != kInvalidAddressMarker)
142 i : return false;
143 E : }
144 :
145 E : for (; i < length_; ++i) {
146 : if ((i >= shadow_begin && i < shadow_end) ||
147 : (i >= page_bits_begin && i < page_bits_end) ||
148 E : (i >= this_begin && i < this_end)) {
149 E : if (shadow_[i] != kAsanMemoryMarker)
150 i : return false;
151 E : } else {
152 E : if (shadow_[i] != kHeapAddressableMarker)
153 i : return false;
154 : }
155 E : }
156 :
157 E : return true;
158 E : }
159 :
160 : void Shadow::SetShadowMemory(const void* address,
161 : size_t length,
162 E : ShadowMarker marker) {
163 : // Default implementation does absolutely nothing.
164 : return;
165 E : }
166 :
167 E : void Shadow::GetPointerAndSize(void const** self, size_t* size) const {
168 E : DCHECK_NE(static_cast<void**>(nullptr), self);
169 E : DCHECK_NE(static_cast<size_t*>(nullptr), size);
170 E : GetPointerAndSizeImpl(self, size);
171 : const uint8* begin = ::common::AlignDown(
172 E : reinterpret_cast<const uint8*>(*self), kShadowRatio);
173 : const uint8* end = ::common::AlignUp(
174 E : reinterpret_cast<const uint8*>(*self) + *size, kShadowRatio);
175 E : *self = begin;
176 E : *size = end - begin;
177 E : }
178 :
179 E : void Shadow::GetPointerAndSizeImpl(void const** self, size_t* size) const {
180 E : DCHECK_NE(static_cast<void**>(nullptr), self);
181 E : DCHECK_NE(static_cast<size_t*>(nullptr), size);
182 E : *self = this;
183 E : *size = sizeof(*this);
184 E : }
185 :
186 E : void Shadow::Init(size_t length) {
187 E : DCHECK_LT(0u, length);
188 :
189 : // The allocation may fail and it needs to be handled gracefully.
190 E : void* mem = ::VirtualAlloc(nullptr, length, MEM_COMMIT, PAGE_READWRITE);
191 E : Init(true, mem, length);
192 E : }
193 :
194 E : void Shadow::Init(bool own_memory, void* shadow, size_t length) {
195 : // Handle the case of a failed allocation.
196 E : if (shadow == nullptr) {
197 i : own_memory_ = false;
198 i : shadow_ = nullptr;
199 i : length = 0;
200 i : return;
201 : }
202 :
203 E : DCHECK_LT(0u, length);
204 E : DCHECK(::common::IsAligned(shadow, kShadowRatio));
205 :
206 E : own_memory_ = own_memory;
207 E : shadow_ = reinterpret_cast<uint8*>(shadow);
208 E : length_ = length;
209 :
210 : // Initialize the page bits array.
211 E : uint64 memory_size = static_cast<uint64>(length) << kShadowRatioLog;
212 E : DCHECK_EQ(0u, memory_size % kPageSize);
213 E : size_t page_count = memory_size / kPageSize;
214 E : size_t page_bytes = page_count / 8;
215 E : page_bits_.resize(page_bytes);
216 :
217 : // Zero the memory.
218 E : Reset();
219 E : }
220 :
221 E : void Shadow::Reset() {
222 E : ::memset(shadow_, 0, length_);
223 E : ::memset(page_bits_.data(), 0, page_bits_.size());
224 :
225 E : SetShadowMemory(0, kShadowRatio * length_, kHeapAddressableMarker);
226 E : }
227 :
228 E : void Shadow::Poison(const void* addr, size_t size, ShadowMarker shadow_val) {
229 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
230 E : uintptr_t start = index & (kShadowRatio - 1);
231 E : DCHECK_EQ(0U, (index + size) & (kShadowRatio - 1));
232 :
233 E : SetShadowMemory(addr, size, shadow_val);
234 :
235 E : index >>= kShadowRatioLog;
236 E : if (start)
237 E : shadow_[index++] = start;
238 :
239 E : size >>= kShadowRatioLog;
240 E : DCHECK_GT(length_, index + size);
241 E : ::memset(shadow_ + index, shadow_val, size);
242 E : }
243 :
244 E : void Shadow::Unpoison(const void* addr, size_t size) {
245 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
246 E : DCHECK_EQ(0u, index & (kShadowRatio - 1));
247 :
248 E : SetShadowMemory(addr, size, kHeapAddressableMarker);
249 :
250 E : uint8 remainder = size & (kShadowRatio - 1);
251 E : index >>= kShadowRatioLog;
252 E : size >>= kShadowRatioLog;
253 E : DCHECK_GT(length_, index + size);
254 E : ::memset(shadow_ + index, kHeapAddressableMarker, size);
255 :
256 E : if (remainder != 0)
257 E : shadow_[index + size] = remainder;
258 E : }
259 :
260 : namespace {
261 :
262 : // An array of kFreedMarkers. This is used for constructing uint16, uint32 and
263 : // uint64 byte variants of kHeapFreedMarker.
264 : static const uint8 kFreedMarkers[] = {
265 : kHeapFreedMarker, kHeapFreedMarker, kHeapFreedMarker, kHeapFreedMarker,
266 : kHeapFreedMarker, kHeapFreedMarker, kHeapFreedMarker, kHeapFreedMarker };
267 : static_assert(sizeof(kFreedMarkers) == sizeof(uint64),
268 : "Wrong number of freed markers.");
269 : static const uint64& kFreedMarker64 =
270 : *reinterpret_cast<const uint64*>(kFreedMarkers);
271 : static const uint32& kFreedMarker32 =
272 : *reinterpret_cast<const uint32*>(kFreedMarkers);
273 : static const uint16& kFreedMarker16 =
274 E : *reinterpret_cast<const uint16*>(kFreedMarkers);
275 : static const uint8& kFreedMarker8 =
276 E : *reinterpret_cast<const uint8*>(kFreedMarkers);
277 :
278 : // Marks the given range of shadow bytes as freed, preserving left and right
279 : // redzone bytes.
280 E : inline void MarkAsFreedImpl8(uint8* cursor, uint8* cursor_end) {
281 E : for (; cursor != cursor_end; ++cursor) {
282 : // Preserve block beginnings/ends/redzones as they were originally.
283 : // This is necessary to preserve information about nested blocks.
284 : if (ShadowMarkerHelper::IsActiveLeftRedzone(*cursor) ||
285 E : ShadowMarkerHelper::IsActiveRightRedzone(*cursor)) {
286 E : continue;
287 : }
288 :
289 : // Anything else gets marked as freed.
290 E : *cursor = kHeapFreedMarker;
291 E : }
292 E : }
293 :
294 : // Marks the given range of shadow bytes as freed, preserving left and right
295 : // redzone bytes. |cursor| and |cursor_end| must be 8-byte aligned.
296 E : inline void MarkAsFreedImplAligned64(uint64* cursor, uint64* cursor_end) {
297 E : DCHECK(::common::IsAligned(cursor, sizeof(uint64)));
298 E : DCHECK(::common::IsAligned(cursor_end, sizeof(uint64)));
299 :
300 E : for (; cursor != cursor_end; ++cursor) {
301 : // If the block of shadow memory is entirely green then mark as freed.
302 : // Otherwise go check its contents byte by byte.
303 E : if (*cursor == 0) {
304 E : *cursor = kFreedMarker64;
305 E : } else {
306 : MarkAsFreedImpl8(reinterpret_cast<uint8*>(cursor),
307 E : reinterpret_cast<uint8*>(cursor + 1));
308 : }
309 E : }
310 E : }
311 :
312 E : inline void MarkAsFreedImpl64(uint8* cursor, uint8* cursor_end) {
313 E : if (cursor_end - cursor >= 2 * sizeof(uint64)) {
314 E : uint8* cursor_aligned = ::common::AlignUp(cursor, sizeof(uint64));
315 : uint8* cursor_end_aligned = ::common::AlignDown(cursor_end,
316 E : sizeof(uint64));
317 E : MarkAsFreedImpl8(cursor, cursor_aligned);
318 : MarkAsFreedImplAligned64(reinterpret_cast<uint64*>(cursor_aligned),
319 E : reinterpret_cast<uint64*>(cursor_end_aligned));
320 E : MarkAsFreedImpl8(cursor_end_aligned, cursor_end);
321 E : } else {
322 E : MarkAsFreedImpl8(cursor, cursor_end);
323 : }
324 E : }
325 :
326 : } // namespace
327 :
328 E : void Shadow::MarkAsFreed(const void* addr, size_t size) {
329 E : DCHECK_LE(kAddressLowerBound, reinterpret_cast<uintptr_t>(addr));
330 E : DCHECK(::common::IsAligned(addr, kShadowRatio));
331 :
332 E : SetShadowMemory(addr, size, kHeapFreedMarker);
333 :
334 E : size_t index = reinterpret_cast<uintptr_t>(addr) / kShadowRatio;
335 E : size_t length = (size + kShadowRatio - 1) / kShadowRatio;
336 E : DCHECK_LE(index, length_);
337 E : DCHECK_LE(index + length, length_);
338 :
339 E : uint8* cursor = shadow_ + index;
340 E : uint8* cursor_end = static_cast<uint8*>(cursor) + length;
341 :
342 : // This isn't as simple as a memset because we need to preserve left and
343 : // right redzone padding bytes that may be found in the range.
344 E : MarkAsFreedImpl64(cursor, cursor_end);
345 E : }
346 :
347 E : bool Shadow::IsAccessible(const void* addr) const {
348 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
349 E : uintptr_t start = index & 0x7;
350 :
351 E : index >>= kShadowRatioLog;
352 :
353 E : DCHECK_GT(length_, index);
354 E : uint8 shadow = shadow_[index];
355 E : if (shadow == 0)
356 E : return true;
357 :
358 E : if (ShadowMarkerHelper::IsRedzone(shadow))
359 E : return false;
360 :
361 E : return start < shadow;
362 E : }
363 :
364 E : bool Shadow::IsLeftRedzone(const void* address) const {
365 : return ShadowMarkerHelper::IsActiveLeftRedzone(
366 E : GetShadowMarkerForAddress(address));
367 E : }
368 :
369 E : bool Shadow::IsRightRedzone(const void* address) const {
370 E : uintptr_t index = reinterpret_cast<uintptr_t>(address);
371 E : uintptr_t start = index & 0x7;
372 :
373 E : index >>= kShadowRatioLog;
374 :
375 E : DCHECK_GT(length_, index);
376 E : uint8 marker = shadow_[index];
377 :
378 : // If the marker is for accessible memory then some addresses may be part
379 : // of a right redzone, assuming that the *next* marker in the shadow is for
380 : // a right redzone.
381 E : if (marker == 0)
382 E : return false;
383 E : if (marker <= kHeapPartiallyAddressableByte7) {
384 E : if (index == length_)
385 i : return false;
386 E : if (!ShadowMarkerHelper::IsActiveRightRedzone(shadow_[index + 1]))
387 i : return false;
388 E : return start >= marker;
389 : }
390 :
391 : // Otherwise, check the marker directly.
392 E : return ShadowMarkerHelper::IsActiveRightRedzone(marker);
393 E : }
394 :
395 : bool Shadow::IsBlockStartByte(const void* address) const {
396 : uintptr_t index = reinterpret_cast<uintptr_t>(address);
397 : uintptr_t start = index & 0x7;
398 :
399 : index >>= kShadowRatioLog;
400 :
401 : DCHECK_GT(length_, index);
402 : uint8 marker = shadow_[index];
403 :
404 : if (start != 0)
405 : return false;
406 : if (!ShadowMarkerHelper::IsActiveBlockStart(marker))
407 : return false;
408 :
409 : return true;
410 : }
411 :
412 E : const uint8* Shadow::GetShadowMemoryForAddress(const void* addr) const {
413 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
414 E : index >>= kShadowRatioLog;
415 E : DCHECK_GE(length_, index);
416 E : return shadow_ + index;
417 E : }
418 :
419 E : ShadowMarker Shadow::GetShadowMarkerForAddress(const void* addr) const {
420 E : return static_cast<ShadowMarker>(*GetShadowMemoryForAddress(addr));
421 E : }
422 :
423 E : void Shadow::PoisonAllocatedBlock(const BlockInfo& info) {
424 : static_assert((sizeof(BlockHeader) % kShadowRatio) == 0, "Bad header size.");
425 E : DCHECK(info.header->state == ALLOCATED_BLOCK);
426 :
427 : // Translate the block address to an offset. Sanity check a whole bunch
428 : // of things that we require to be true for the shadow to have 100%
429 : // fidelity.
430 E : uintptr_t index = reinterpret_cast<uintptr_t>(info.header);
431 E : DCHECK(::common::IsAligned(index, kShadowRatio));
432 E : DCHECK(::common::IsAligned(info.header_padding_size, kShadowRatio));
433 E : DCHECK(::common::IsAligned(info.block_size, kShadowRatio));
434 E : index /= kShadowRatio;
435 :
436 : // Determine the distribution of bytes in the shadow.
437 E : size_t left_redzone_bytes = info.TotalHeaderSize() / kShadowRatio;
438 E : size_t body_bytes = (info.body_size + kShadowRatio - 1) / kShadowRatio;
439 E : size_t block_bytes = info.block_size / kShadowRatio;
440 E : size_t right_redzone_bytes = block_bytes - left_redzone_bytes - body_bytes;
441 :
442 : // Determine the marker byte for the header. This encodes the length of the
443 : // body of the allocation modulo the shadow ratio, so that the exact length
444 : // can be inferred from inspecting the shadow memory.
445 E : uint8 body_size_mod = info.body_size % kShadowRatio;
446 : uint8 header_marker = ShadowMarkerHelper::BuildBlockStart(
447 E : true, info.header->is_nested, body_size_mod);
448 :
449 : // Determine the marker byte for the trailer.
450 : uint8 trailer_marker = ShadowMarkerHelper::BuildBlockEnd(
451 E : true, info.header->is_nested);
452 :
453 : // Poison the header and left padding.
454 E : uint8* cursor = shadow_ + index;
455 E : ::memset(cursor, header_marker, 1);
456 E : ::memset(cursor + 1, kHeapLeftPaddingMarker, left_redzone_bytes - 1);
457 E : cursor += left_redzone_bytes;
458 E : ::memset(cursor, kHeapAddressableMarker, body_bytes);
459 E : cursor += body_bytes;
460 :
461 : // Poison the right padding and the trailer.
462 E : if (body_size_mod > 0)
463 E : cursor[-1] = body_size_mod;
464 E : ::memset(cursor, kHeapRightPaddingMarker, right_redzone_bytes - 1);
465 E : ::memset(cursor + right_redzone_bytes - 1, trailer_marker, 1);
466 :
467 : SetShadowMemory(info.header,
468 : info.TotalHeaderSize(),
469 E : kHeapLeftPaddingMarker);
470 E : SetShadowMemory(info.body, info.body_size, kHeapAddressableMarker);
471 : SetShadowMemory(info.trailer_padding,
472 : info.TotalTrailerSize(),
473 E : kHeapRightPaddingMarker);
474 E : }
475 :
476 E : bool Shadow::BlockIsNested(const BlockInfo& info) const {
477 E : uint8 marker = GetShadowMarkerForAddress(info.header);
478 E : DCHECK(ShadowMarkerHelper::IsActiveBlockStart(marker));
479 E : return ShadowMarkerHelper::IsNestedBlockStart(marker);
480 E : }
481 :
482 : bool Shadow::BlockInfoFromShadow(
483 E : const void* addr, CompactBlockInfo* info) const {
484 E : DCHECK_NE(static_cast<void*>(NULL), addr);
485 E : DCHECK_NE(static_cast<CompactBlockInfo*>(NULL), info);
486 E : if (!BlockInfoFromShadowImpl(0, addr, info))
487 E : return false;
488 E : return true;
489 E : }
490 :
491 E : bool Shadow::BlockInfoFromShadow(const void* addr, BlockInfo* info) const {
492 E : DCHECK_NE(static_cast<void*>(NULL), addr);
493 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), info);
494 E : CompactBlockInfo compact = {};
495 E : if (!BlockInfoFromShadow(addr, &compact))
496 E : return false;
497 E : ConvertBlockInfo(compact, info);
498 E : return true;
499 E : }
500 :
501 : bool Shadow::ParentBlockInfoFromShadow(const BlockInfo& nested,
502 E : BlockInfo* info) const {
503 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), info);
504 E : if (!BlockIsNested(nested))
505 E : return false;
506 E : CompactBlockInfo compact = {};
507 E : if (!BlockInfoFromShadowImpl(1, nested.header, &compact))
508 i : return false;
509 E : ConvertBlockInfo(compact, info);
510 E : return true;
511 E : }
512 :
513 E : bool Shadow::IsBeginningOfBlockBody(const void* addr) const {
514 E : DCHECK_NE(static_cast<void*>(NULL), addr);
515 : // If the block has a non-zero body size then the beginning of the body will
516 : // be accessible or tagged as freed.
517 : // If the block has an empty body then the beginning of the body will be a
518 : // right redzone.
519 : if (IsAccessible(addr) || IsRightRedzone(addr) ||
520 E : GetShadowMarkerForAddress(addr) == kHeapFreedMarker) {
521 E : return IsLeftRedzone(reinterpret_cast<const uint8*>(addr) - 1);
522 : }
523 E : return false;
524 E : }
525 :
526 E : bool Shadow::PageIsProtected(const void* addr) const {
527 : // Since the page bit is read very frequently this is not performed
528 : // under a lock. The values change quite rarely, so this will almost always
529 : // be correct. However, consumers of this knowledge have to be robust to
530 : // getting incorrect data.
531 E : size_t index = 0;
532 E : uint8 mask = 0;
533 E : AddressToPageMask(addr, &index, &mask);
534 E : return (page_bits_[index] & mask) == mask;
535 E : }
536 :
537 E : void Shadow::MarkPageProtected(const void* addr) {
538 E : size_t index = 0;
539 E : uint8 mask = 0;
540 E : AddressToPageMask(addr, &index, &mask);
541 :
542 E : base::AutoLock lock(page_bits_lock_);
543 E : page_bits_[index] |= mask;
544 E : }
545 :
546 E : void Shadow::MarkPageUnprotected(const void* addr) {
547 E : size_t index = 0;
548 E : uint8 mask = 0;
549 E : AddressToPageMask(addr, &index, &mask);
550 E : mask = ~mask;
551 :
552 E : base::AutoLock lock(page_bits_lock_);
553 E : page_bits_[index] &= mask;
554 E : }
555 :
556 E : void Shadow::MarkPagesProtected(const void* addr, size_t size) {
557 E : const uint8* page = reinterpret_cast<const uint8*>(addr);
558 E : const uint8* page_end = page + size;
559 E : size_t index = 0;
560 E : uint8 mask = 0;
561 :
562 E : base::AutoLock lock(page_bits_lock_);
563 E : while (page < page_end) {
564 E : AddressToPageMask(page, &index, &mask);
565 E : page_bits_[index] |= mask;
566 E : page += kPageSize;
567 E : }
568 E : }
569 :
570 E : void Shadow::MarkPagesUnprotected(const void* addr, size_t size) {
571 E : const uint8* page = reinterpret_cast<const uint8*>(addr);
572 E : const uint8* page_end = page + size;
573 E : size_t index = 0;
574 E : uint8 mask = 0;
575 :
576 E : base::AutoLock lock(page_bits_lock_);
577 E : while (page < page_end) {
578 E : AddressToPageMask(page, &index, &mask);
579 E : page_bits_[index] &= ~mask;
580 E : page += kPageSize;
581 E : }
582 E : }
583 :
584 : void Shadow::AppendShadowByteText(const char *prefix,
585 : uintptr_t index,
586 : std::string* output,
587 E : size_t bug_index) const {
588 : base::StringAppendF(
589 : output,
590 : "%s0x%08x:",
591 : prefix,
592 E : reinterpret_cast<void*>(index << kShadowRatioLog));
593 E : char separator = ' ';
594 E : for (uint32 i = 0; i < kShadowBytesPerLine; i++) {
595 E : if (index + i == bug_index)
596 E : separator = '[';
597 E : uint8 shadow_value = shadow_[index + i];
598 : base::StringAppendF(
599 E : output, "%c%x%x", separator, shadow_value >> 4, shadow_value & 15);
600 E : if (separator == '[')
601 E : separator = ']';
602 E : else if (separator == ']')
603 E : separator = ' ';
604 E : }
605 E : if (separator == ']')
606 E : base::StringAppendF(output, "]");
607 E : base::StringAppendF(output, "\n");
608 E : }
609 :
610 : void Shadow::AppendShadowArrayText(
611 E : const void* addr, std::string* output) const {
612 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
613 E : index >>= kShadowRatioLog;
614 E : size_t index_start = index;
615 E : index_start /= kShadowBytesPerLine;
616 E : index_start *= kShadowBytesPerLine;
617 E : for (int i = -static_cast<int>(kShadowContextLines);
618 E : i <= static_cast<int>(kShadowContextLines); i++) {
619 E : const char * const prefix = (i == 0) ? "=>" : " ";
620 : AppendShadowByteText(
621 E : prefix, (index_start + i * kShadowBytesPerLine), output, index);
622 E : }
623 E : }
624 :
625 : void Shadow::AppendShadowMemoryText(
626 E : const void* addr, std::string* output) const {
627 E : base::StringAppendF(output, "Shadow bytes around the buggy address:\n");
628 E : AppendShadowArrayText(addr, output);
629 : base::StringAppendF(output, "Shadow byte legend (one shadow byte represents "
630 E : "8 application bytes):\n");
631 E : base::StringAppendF(output, " Addressable: 00\n");
632 E : base::StringAppendF(output, " Partially addressable: 01 - 07\n");
633 : base::StringAppendF(output, " Block start redzone: %02x - %02x\n",
634 E : kHeapBlockStartMarker0, kHeapBlockStartMarker7);
635 : base::StringAppendF(output, " Nested block start: %02x - %02x\n",
636 : kHeapNestedBlockStartMarker0,
637 E : kHeapNestedBlockStartMarker7);
638 : base::StringAppendF(output, " Asan memory byte: %02x\n",
639 E : kAsanMemoryMarker);
640 : base::StringAppendF(output, " Invalid address: %02x\n",
641 E : kInvalidAddressMarker);
642 : base::StringAppendF(output, " User redzone: %02x\n",
643 E : kUserRedzoneMarker);
644 : base::StringAppendF(output, " Block end redzone: %02x\n",
645 E : kHeapBlockEndMarker);
646 : base::StringAppendF(output, " Nested block end: %02x\n",
647 E : kHeapNestedBlockEndMarker);
648 : base::StringAppendF(output, " Heap left redzone: %02x\n",
649 E : kHeapLeftPaddingMarker);
650 : base::StringAppendF(output, " Heap right redzone: %02x\n",
651 E : kHeapRightPaddingMarker);
652 : base::StringAppendF(output, " Asan reserved byte: %02x\n",
653 E : kAsanReservedMarker);
654 : base::StringAppendF(output, " Freed heap region: %02x\n",
655 E : kHeapFreedMarker);
656 E : }
657 :
658 : size_t Shadow::GetAllocSize(const uint8* mem) const {
659 : BlockInfo block_info = {};
660 : if (!Shadow::BlockInfoFromShadow(mem, &block_info))
661 : return 0;
662 : return block_info.block_size;
663 : }
664 :
665 : bool Shadow::ScanLeftForBracketingBlockStart(
666 E : size_t initial_nesting_depth, size_t cursor, size_t* location) const {
667 E : DCHECK_NE(static_cast<size_t*>(NULL), location);
668 :
669 : static const size_t kLowerBound = kAddressLowerBound / kShadowRatio;
670 :
671 E : size_t left = cursor;
672 E : int nesting_depth = static_cast<int>(initial_nesting_depth);
673 E : if (ShadowMarkerHelper::IsBlockEnd(shadow_[left]))
674 E : --nesting_depth;
675 E : while (true) {
676 E : if (ShadowMarkerHelper::IsBlockStart(shadow_[left])) {
677 E : if (nesting_depth == 0) {
678 E : *location = left;
679 E : return true;
680 : }
681 : // If this is not a nested block then there's no hope of finding a
682 : // block containing the original cursor.
683 E : if (!ShadowMarkerHelper::IsNestedBlockStart(shadow_[left]))
684 E : return false;
685 E : --nesting_depth;
686 E : } else if (ShadowMarkerHelper::IsBlockEnd(shadow_[left])) {
687 E : ++nesting_depth;
688 :
689 : // If we encounter the end of a non-nested block there's no way for
690 : // a block to bracket us.
691 : if (nesting_depth > 0 &&
692 E : !ShadowMarkerHelper::IsNestedBlockEnd(shadow_[left])) {
693 E : return false;
694 : }
695 : }
696 E : if (left <= kLowerBound)
697 E : return false;
698 E : --left;
699 E : }
700 :
701 i : NOTREACHED();
702 E : }
703 :
704 : namespace {
705 :
706 : // This handles an unaligned input cursor. It can potentially read up to 7
707 : // bytes past the end of the cursor, but only up to an 8 byte boundary. Thus
708 : // this out of bounds access is safe.
709 : inline const uint8* ScanRightForPotentialHeaderBytes(
710 E : const uint8* pos, const uint8* end) {
711 E : DCHECK(::common::IsAligned(end, 8));
712 :
713 : // Handle the first few bytes that aren't aligned. If pos == end then
714 : // it is already 8-byte aligned and we'll simply fall through to the end.
715 : // This is a kind of Duffy's device that takes bytes 'as large as possible'
716 : // until we reach an 8 byte alignment.
717 E : switch (reinterpret_cast<uintptr_t>(pos) & 0x7) {
718 : case 1:
719 E : if (*pos != 0 && *pos != kFreedMarker8)
720 E : return pos;
721 E : pos += 1;
722 : case 2:
723 : if (*reinterpret_cast<const uint16*>(pos) != 0 &&
724 E : *reinterpret_cast<const uint16*>(pos) != kFreedMarker16) {
725 E : return pos;
726 : }
727 E : pos += 2;
728 : case 4:
729 : if (*reinterpret_cast<const uint32*>(pos) != 0 &&
730 E : *reinterpret_cast<const uint32*>(pos) != kFreedMarker32) {
731 E : return pos;
732 : }
733 E : pos += 4;
734 E : break;
735 :
736 : case 3:
737 E : if (*pos != 0 && *pos != kFreedMarker8)
738 E : return pos;
739 E : pos += 1;
740 : // Now have alignment of 4.
741 : if (*reinterpret_cast<const uint32*>(pos) != 0 &&
742 E : *reinterpret_cast<const uint32*>(pos) != kFreedMarker32) {
743 E : return pos;
744 : }
745 E : pos += 4;
746 E : break;
747 :
748 : case 5:
749 E : if (*pos != 0 && *pos != kFreedMarker8)
750 E : return pos;
751 E : pos += 1;
752 : case 6:
753 : if (*reinterpret_cast<const uint16*>(pos) != 0 &&
754 E : *reinterpret_cast<const uint16*>(pos) != kFreedMarker16) {
755 E : return pos;
756 : }
757 E : pos += 2;
758 E : break;
759 :
760 : case 7:
761 E : if (*pos != 0 && *pos != kFreedMarker8)
762 E : return pos;
763 E : pos += 1;
764 :
765 : case 0:
766 : default:
767 : // Do nothing, as we're already 8 byte aligned.
768 : break;
769 : }
770 :
771 : // Handle the 8-byte aligned bytes as much as we can.
772 E : while (pos < end) {
773 : if (*reinterpret_cast<const uint64*>(pos) != 0 &&
774 E : *reinterpret_cast<const uint64*>(pos) != kFreedMarker64) {
775 E : return pos;
776 : }
777 E : pos += 8;
778 E : }
779 :
780 i : return pos;
781 E : }
782 :
783 : } // namespace
784 :
785 : bool Shadow::ScanRightForBracketingBlockEnd(
786 E : size_t initial_nesting_depth, size_t cursor, size_t* location) const {
787 E : DCHECK_NE(static_cast<size_t*>(NULL), location);
788 :
789 E : const uint8* shadow_end = shadow_ + length_;
790 E : const uint8* pos = shadow_ + cursor;
791 E : int nesting_depth = static_cast<int>(initial_nesting_depth);
792 E : if (ShadowMarkerHelper::IsBlockStart(*pos))
793 E : --nesting_depth;
794 E : while (pos < shadow_end) {
795 : // Skips past as many addressable and freed bytes as possible.
796 E : pos = ScanRightForPotentialHeaderBytes(pos, shadow_end);
797 E : if (pos == shadow_end)
798 i : return false;
799 :
800 : // When the above loop exits early then somewhere in the next 8 bytes
801 : // there's non-addressable data that isn't 'freed'. Look byte by byte to
802 : // see what's up.
803 :
804 E : if (ShadowMarkerHelper::IsBlockEnd(*pos)) {
805 E : if (nesting_depth == 0) {
806 E : *location = pos - shadow_;
807 E : return true;
808 : }
809 E : if (!ShadowMarkerHelper::IsNestedBlockEnd(*pos))
810 E : return false;
811 E : --nesting_depth;
812 E : } else if (ShadowMarkerHelper::IsBlockStart(*pos)) {
813 E : ++nesting_depth;
814 :
815 : // If we encounter the beginning of a non-nested block then there's
816 : // clearly no way for any block to bracket us.
817 : if (nesting_depth > 0 &&
818 E : !ShadowMarkerHelper::IsNestedBlockStart(*pos)) {
819 E : return false;
820 : }
821 : }
822 E : ++pos;
823 E : }
824 i : return false;
825 E : }
826 :
827 : bool Shadow::BlockInfoFromShadowImpl(
828 : size_t initial_nesting_depth,
829 : const void* addr,
830 E : CompactBlockInfo* info) const {
831 E : DCHECK_NE(static_cast<void*>(NULL), addr);
832 E : DCHECK_NE(static_cast<CompactBlockInfo*>(NULL), info);
833 :
834 : // Convert the address to an offset in the shadow memory.
835 E : size_t left = reinterpret_cast<uintptr_t>(addr) / kShadowRatio;
836 E : size_t right = left;
837 :
838 E : if (!ScanLeftForBracketingBlockStart(initial_nesting_depth, left, &left))
839 E : return false;
840 E : if (!ScanRightForBracketingBlockEnd(initial_nesting_depth, right, &right))
841 i : return false;
842 E : ++right;
843 :
844 E : uint8* block = reinterpret_cast<uint8*>(left * kShadowRatio);
845 E : info->header = reinterpret_cast<BlockHeader*>(block);
846 E : info->block_size = (right - left) * kShadowRatio;
847 :
848 : // Get the length of the body modulo the shadow ratio.
849 E : size_t body_size_mod = ShadowMarkerHelper::GetBlockStartData(shadow_[left]);
850 E : info->is_nested = ShadowMarkerHelper::IsNestedBlockStart(shadow_[left]);
851 :
852 : // Find the beginning of the body (end of the left redzone).
853 E : ++left;
854 E : while (left < right && shadow_[left] == kHeapLeftPaddingMarker)
855 E : ++left;
856 :
857 : // Find the beginning of the right redzone (end of the body).
858 E : --right;
859 E : while (right > left && shadow_[right - 1] == kHeapRightPaddingMarker)
860 E : --right;
861 :
862 : // Calculate the body location and size.
863 E : uint8* body = reinterpret_cast<uint8*>(left * kShadowRatio);
864 E : size_t body_size = (right - left) * kShadowRatio;
865 E : if (body_size_mod > 0) {
866 E : DCHECK_LE(8u, body_size);
867 E : body_size = body_size - kShadowRatio + body_size_mod;
868 : }
869 :
870 : // Fill out header and trailer sizes.
871 E : info->header_size = body - block;
872 E : info->trailer_size = info->block_size - body_size - info->header_size;
873 :
874 E : return true;
875 E : }
876 :
877 : ShadowWalker::ShadowWalker(const Shadow* shadow,
878 : bool recursive,
879 : const void* lower_bound,
880 : const void* upper_bound)
881 : : shadow_(shadow), recursive_(recursive), lower_bound_(0), upper_bound_(0),
882 E : cursor_(nullptr), shadow_cursor_(nullptr), nesting_depth_(0) {
883 E : DCHECK_NE(static_cast<Shadow*>(nullptr), shadow);
884 E : DCHECK_LE(Shadow::kAddressLowerBound, reinterpret_cast<size_t>(lower_bound));
885 E : DCHECK_GE(shadow->memory_size(), reinterpret_cast<size_t>(upper_bound));
886 E : DCHECK_LE(lower_bound, upper_bound);
887 :
888 : lower_bound_ = ::common::AlignDown(
889 E : reinterpret_cast<const uint8*>(lower_bound), kShadowRatio);
890 : upper_bound_ = ::common::AlignUp(
891 E : reinterpret_cast<const uint8*>(upper_bound), kShadowRatio);
892 E : Reset();
893 E : }
894 :
895 E : void ShadowWalker::Reset() {
896 : // Walk to the beginning of the first non-nested block, or to the end
897 : // of the range, whichever comes first.
898 E : nesting_depth_ = -1;
899 E : for (cursor_ = lower_bound_; cursor_ != upper_bound_;
900 E : cursor_ += kShadowRatio) {
901 E : uint8 marker = shadow_->GetShadowMarkerForAddress(cursor_);
902 : if (ShadowMarkerHelper::IsBlockStart(marker) &&
903 E : !ShadowMarkerHelper::IsNestedBlockStart(marker)) {
904 E : break;
905 : }
906 E : }
907 :
908 E : shadow_cursor_ = shadow_->GetShadowMemoryForAddress(cursor_);
909 E : }
910 :
911 E : bool ShadowWalker::Next(BlockInfo* info) {
912 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), info);
913 :
914 : // Iterate until a reportable block is encountered, or the slab is exhausted.
915 E : for (; cursor_ != upper_bound_; cursor_ += kShadowRatio) {
916 E : uint8 marker = shadow_->GetShadowMarkerForAddress(cursor_);
917 :
918 : // Update the nesting depth when block end markers are encountered.
919 E : if (ShadowMarkerHelper::IsBlockEnd(marker)) {
920 E : DCHECK_LE(0, nesting_depth_);
921 E : --nesting_depth_;
922 E : continue;
923 : }
924 :
925 : // Look for a block start marker.
926 E : if (ShadowMarkerHelper::IsBlockStart(marker)) {
927 : // Update the nesting depth when block start bytes are encountered.
928 E : ++nesting_depth_;
929 :
930 : // Non-nested blocks should only be encountered at depth 0.
931 E : bool is_nested = ShadowMarkerHelper::IsNestedBlockStart(marker);
932 E : DCHECK(is_nested || nesting_depth_ == 0);
933 :
934 : // Determine if the block is to be reported.
935 E : if (!is_nested || recursive_) {
936 : // This can only fail if the shadow memory is malformed.
937 E : CHECK(shadow_->BlockInfoFromShadow(cursor_, info));
938 :
939 : // In a recursive descent we have to process body contents.
940 E : if (recursive_) {
941 E : cursor_ += kShadowRatio;
942 E : } else {
943 : // Otherwise we can skip the body of the block we just reported.
944 : // We skip directly to the end marker (but not past it so that depth
945 : // bookkeeping works properly).
946 E : cursor_ += info->block_size - kShadowRatio;
947 : }
948 :
949 E : shadow_cursor_ = shadow_->GetShadowMemoryForAddress(cursor_);
950 E : return true;
951 : }
952 i : continue;
953 : }
954 E : }
955 :
956 E : return false;
957 E : }
958 :
959 : } // namespace asan
960 : } // namespace agent
|