1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/shadow.h"
16 :
17 : #include <windows.h>
18 : #include <algorithm>
19 :
20 : #include "base/strings/stringprintf.h"
21 : #include "base/win/pe_image.h"
22 : #include "syzygy/common/align.h"
23 :
24 : namespace agent {
25 : namespace asan {
26 :
27 : namespace {
28 :
29 E : static const size_t kPageSize = GetPageSize();
30 :
31 : // Converts an address to a page index and bit mask.
32 : inline void AddressToPageMask(const void* address,
33 : size_t* index,
34 E : uint8_t* mask) {
35 E : DCHECK_NE(static_cast<size_t*>(nullptr), index);
36 E : DCHECK_NE(static_cast<uint8_t*>(nullptr), mask);
37 :
38 E : size_t i = reinterpret_cast<uintptr_t>(address) / kPageSize;
39 E : *index = i / 8;
40 E : *mask = 1 << (i % 8);
41 E : }
42 :
43 : } // namespace
44 :
45 E : Shadow::Shadow() : own_memory_(false), shadow_(nullptr), length_(0) {
46 E : Init(RequiredLength());
47 E : }
48 :
49 : Shadow::Shadow(size_t length)
50 E : : own_memory_(false), shadow_(nullptr), length_(0) {
51 E : Init(length);
52 E : }
53 :
54 : Shadow::Shadow(void* shadow, size_t length)
55 E : : own_memory_(false), shadow_(nullptr), length_(0) {
56 E : Init(false, shadow, length);
57 E : }
58 :
59 E : Shadow::~Shadow() {
60 E : if (own_memory_)
61 E : CHECK(::VirtualFree(shadow_, 0, MEM_RELEASE));
62 E : own_memory_ = false;
63 E : shadow_ = nullptr;
64 E : length_ = 0;
65 E : }
66 :
67 : // static
68 E : size_t Shadow::RequiredLength() {
69 E : MEMORYSTATUSEX mem_status = {};
70 E : mem_status.dwLength = sizeof(mem_status);
71 E : CHECK(::GlobalMemoryStatusEx(&mem_status));
72 :
73 : // Because of the way the interceptors work we only support 2GB or 4GB
74 : // virtual memory sizes, even if the actual is 3GB (32-bit windows, LAA,
75 : // and 4GT kernel option enabled).
76 E : uint64_t mem_size = ::common::AlignUp64(mem_status.ullTotalVirtual,
77 : 2UL << 30); // 2GB.
78 :
79 E : return mem_size >> kShadowRatioLog;
80 E : }
81 :
82 E : void Shadow::SetUp() {
83 : // Poison the shadow object itself.
84 E : const void* self = nullptr;
85 E : size_t self_size = 0;
86 E : GetPointerAndSize(&self, &self_size);
87 E : DCHECK(::common::IsAligned(self, kShadowRatio));
88 E : DCHECK(::common::IsAligned(self_size, kShadowRatio));
89 E : Poison(self, self_size, kAsanMemoryMarker);
90 :
91 : // Poison the shadow memory.
92 E : Poison(shadow_, length_, kAsanMemoryMarker);
93 : // Poison the first 64k of the memory as they're not addressable.
94 E : Poison(0, kAddressLowerBound, kInvalidAddressMarker);
95 : // Poison the protection bits array.
96 E : Poison(page_bits_.data(), page_bits_.size(), kAsanMemoryMarker);
97 E : }
98 :
99 E : void Shadow::TearDown() {
100 : // Unpoison the shadow object itself.
101 E : const void* self = nullptr;
102 E : size_t self_size = 0;
103 E : GetPointerAndSize(&self, &self_size);
104 E : DCHECK(::common::IsAligned(self, kShadowRatio));
105 E : DCHECK(::common::IsAligned(self_size, kShadowRatio));
106 E : Unpoison(self, self_size);
107 :
108 : // Unpoison the shadow memory.
109 E : Unpoison(shadow_, length_);
110 : // Unpoison the first 64k of the memory.
111 E : Unpoison(0, kAddressLowerBound);
112 : // Unpoison the protection bits array.
113 E : Unpoison(page_bits_.data(), page_bits_.size());
114 E : }
115 :
116 E : bool Shadow::IsClean() const {
117 E : const size_t innac_end = kAddressLowerBound >> kShadowRatioLog;
118 :
119 : const size_t shadow_begin =
120 E : reinterpret_cast<uintptr_t>(shadow_) >> kShadowRatioLog;
121 : const size_t shadow_end =
122 E : reinterpret_cast<uintptr_t>(shadow_ + length_) >> kShadowRatioLog;
123 :
124 : const size_t page_bits_begin =
125 E : reinterpret_cast<uintptr_t>(page_bits_.data()) >> kShadowRatioLog;
126 : const size_t page_bits_end =
127 E : reinterpret_cast<uintptr_t>(page_bits_.data() + page_bits_.size()) >>
128 : kShadowRatioLog;
129 :
130 E : void const* self = nullptr;
131 E : size_t self_size = 0;
132 E : GetPointerAndSize(&self, &self_size);
133 : const size_t this_begin =
134 E : reinterpret_cast<uintptr_t>(self) >> kShadowRatioLog;
135 : const size_t this_end =
136 E : (reinterpret_cast<uintptr_t>(self) + self_size + kShadowRatio - 1) >>
137 : kShadowRatioLog;
138 :
139 E : size_t i = 0;
140 E : for (; i < innac_end; ++i) {
141 E : if (shadow_[i] != kInvalidAddressMarker)
142 i : return false;
143 E : }
144 :
145 E : for (; i < length_; ++i) {
146 : if ((i >= shadow_begin && i < shadow_end) ||
147 E : (i >= page_bits_begin && i < page_bits_end) ||
148 : (i >= this_begin && i < this_end)) {
149 E : if (shadow_[i] != kAsanMemoryMarker)
150 i : return false;
151 E : } else {
152 E : if (shadow_[i] != kHeapAddressableMarker)
153 i : return false;
154 : }
155 E : }
156 :
157 E : return true;
158 E : }
159 :
160 : void Shadow::SetShadowMemory(const void* address,
161 : size_t length,
162 E : ShadowMarker marker) {
163 : // Default implementation does absolutely nothing.
164 : return;
165 E : }
166 :
167 E : void Shadow::GetPointerAndSize(void const** self, size_t* size) const {
168 E : DCHECK_NE(static_cast<void**>(nullptr), self);
169 E : DCHECK_NE(static_cast<size_t*>(nullptr), size);
170 E : GetPointerAndSizeImpl(self, size);
171 E : const uint8_t* begin = ::common::AlignDown(
172 : reinterpret_cast<const uint8_t*>(*self), kShadowRatio);
173 E : const uint8_t* end = ::common::AlignUp(
174 : reinterpret_cast<const uint8_t*>(*self) + *size, kShadowRatio);
175 E : *self = begin;
176 E : *size = end - begin;
177 E : }
178 :
179 E : void Shadow::GetPointerAndSizeImpl(void const** self, size_t* size) const {
180 E : DCHECK_NE(static_cast<void**>(nullptr), self);
181 E : DCHECK_NE(static_cast<size_t*>(nullptr), size);
182 E : *self = this;
183 E : *size = sizeof(*this);
184 E : }
185 :
186 E : void Shadow::Init(size_t length) {
187 E : DCHECK_LT(0u, length);
188 :
189 : // The allocation may fail and it needs to be handled gracefully.
190 E : void* mem = ::VirtualAlloc(nullptr, length, MEM_COMMIT, PAGE_READWRITE);
191 E : Init(true, mem, length);
192 E : }
193 :
194 E : void Shadow::Init(bool own_memory, void* shadow, size_t length) {
195 : // Handle the case of a failed allocation.
196 E : if (shadow == nullptr) {
197 i : own_memory_ = false;
198 i : shadow_ = nullptr;
199 i : length = 0;
200 i : return;
201 : }
202 :
203 E : DCHECK_LT(0u, length);
204 E : DCHECK(::common::IsAligned(shadow, kShadowRatio));
205 :
206 E : own_memory_ = own_memory;
207 E : shadow_ = reinterpret_cast<uint8_t*>(shadow);
208 E : length_ = length;
209 :
210 : // Initialize the page bits array.
211 E : uint64_t memory_size = static_cast<uint64_t>(length) << kShadowRatioLog;
212 E : DCHECK_EQ(0u, memory_size % kPageSize);
213 E : size_t page_count = memory_size / kPageSize;
214 E : size_t page_bytes = page_count / 8;
215 E : page_bits_.resize(page_bytes);
216 :
217 : // Zero the memory.
218 E : Reset();
219 E : }
220 :
221 E : void Shadow::Reset() {
222 E : ::memset(shadow_, 0, length_);
223 E : ::memset(page_bits_.data(), 0, page_bits_.size());
224 :
225 E : SetShadowMemory(0, kShadowRatio * length_, kHeapAddressableMarker);
226 E : }
227 :
228 E : void Shadow::Poison(const void* addr, size_t size, ShadowMarker shadow_val) {
229 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
230 E : uintptr_t start = index & (kShadowRatio - 1);
231 E : DCHECK_EQ(0U, (index + size) & (kShadowRatio - 1));
232 :
233 E : SetShadowMemory(addr, size, shadow_val);
234 :
235 E : index >>= kShadowRatioLog;
236 E : if (start)
237 E : shadow_[index++] = start;
238 :
239 E : size >>= kShadowRatioLog;
240 E : DCHECK_GT(length_, index + size);
241 E : ::memset(shadow_ + index, shadow_val, size);
242 E : }
243 :
244 E : void Shadow::Unpoison(const void* addr, size_t size) {
245 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
246 E : DCHECK_EQ(0u, index & (kShadowRatio - 1));
247 :
248 E : SetShadowMemory(addr, size, kHeapAddressableMarker);
249 :
250 E : uint8_t remainder = size & (kShadowRatio - 1);
251 E : index >>= kShadowRatioLog;
252 E : size >>= kShadowRatioLog;
253 E : DCHECK_GT(length_, index + size);
254 E : ::memset(shadow_ + index, kHeapAddressableMarker, size);
255 :
256 E : if (remainder != 0)
257 E : shadow_[index + size] = remainder;
258 E : }
259 :
260 : namespace {
261 :
262 : static const uint8_t kFreedMarker8 = kHeapFreedMarker;
263 : static const uint16_t kFreedMarker16 =
264 : (static_cast<const uint16_t>(kFreedMarker8) << 8) | kFreedMarker8;
265 : static const uint32_t kFreedMarker32 =
266 : (static_cast<const uint32_t>(kFreedMarker16) << 16) | kFreedMarker16;
267 : static const uint64_t kFreedMarker64 =
268 : (static_cast<const uint64_t>(kFreedMarker32) << 32) | kFreedMarker32;
269 :
270 : // Marks the given range of shadow bytes as freed, preserving left and right
271 : // redzone bytes.
272 E : inline void MarkAsFreedImpl8(uint8_t* cursor, uint8_t* cursor_end) {
273 E : for (; cursor != cursor_end; ++cursor) {
274 : // Preserve block beginnings/ends/redzones as they were originally.
275 : // This is necessary to preserve information about nested blocks.
276 E : if (ShadowMarkerHelper::IsActiveLeftRedzone(*cursor) ||
277 : ShadowMarkerHelper::IsActiveRightRedzone(*cursor)) {
278 E : continue;
279 : }
280 :
281 : // Anything else gets marked as freed.
282 E : *cursor = kHeapFreedMarker;
283 E : }
284 E : }
285 :
286 : // Marks the given range of shadow bytes as freed, preserving left and right
287 : // redzone bytes. |cursor| and |cursor_end| must be 8-byte aligned.
288 E : inline void MarkAsFreedImplAligned64(uint64_t* cursor, uint64_t* cursor_end) {
289 E : DCHECK(::common::IsAligned(cursor, sizeof(uint64_t)));
290 E : DCHECK(::common::IsAligned(cursor_end, sizeof(uint64_t)));
291 :
292 E : for (; cursor != cursor_end; ++cursor) {
293 : // If the block of shadow memory is entirely green then mark as freed.
294 : // Otherwise go check its contents byte by byte.
295 E : if (*cursor == 0) {
296 E : *cursor = kFreedMarker64;
297 E : } else {
298 E : MarkAsFreedImpl8(reinterpret_cast<uint8_t*>(cursor),
299 : reinterpret_cast<uint8_t*>(cursor + 1));
300 : }
301 E : }
302 E : }
303 :
304 E : inline void MarkAsFreedImpl64(uint8_t* cursor, uint8_t* cursor_end) {
305 E : if (cursor_end - cursor >= 2 * sizeof(uint64_t)) {
306 E : uint8_t* cursor_aligned = ::common::AlignUp(cursor, sizeof(uint64_t));
307 : uint8_t* cursor_end_aligned =
308 E : ::common::AlignDown(cursor_end, sizeof(uint64_t));
309 E : MarkAsFreedImpl8(cursor, cursor_aligned);
310 E : MarkAsFreedImplAligned64(reinterpret_cast<uint64_t*>(cursor_aligned),
311 : reinterpret_cast<uint64_t*>(cursor_end_aligned));
312 E : MarkAsFreedImpl8(cursor_end_aligned, cursor_end);
313 E : } else {
314 E : MarkAsFreedImpl8(cursor, cursor_end);
315 : }
316 E : }
317 :
318 : } // namespace
319 :
320 E : void Shadow::MarkAsFreed(const void* addr, size_t size) {
321 E : DCHECK_LE(kAddressLowerBound, reinterpret_cast<uintptr_t>(addr));
322 E : DCHECK(::common::IsAligned(addr, kShadowRatio));
323 :
324 E : SetShadowMemory(addr, size, kHeapFreedMarker);
325 :
326 E : size_t index = reinterpret_cast<uintptr_t>(addr) / kShadowRatio;
327 E : size_t length = (size + kShadowRatio - 1) / kShadowRatio;
328 E : DCHECK_LE(index, length_);
329 E : DCHECK_LE(index + length, length_);
330 :
331 E : uint8_t* cursor = shadow_ + index;
332 E : uint8_t* cursor_end = static_cast<uint8_t*>(cursor) + length;
333 :
334 : // This isn't as simple as a memset because we need to preserve left and
335 : // right redzone padding bytes that may be found in the range.
336 E : MarkAsFreedImpl64(cursor, cursor_end);
337 E : }
338 :
339 E : bool Shadow::IsAccessible(const void* addr) const {
340 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
341 E : uintptr_t start = index & 0x7;
342 :
343 E : index >>= kShadowRatioLog;
344 E : if (index > length_)
345 i : return false;
346 :
347 E : DCHECK_GT(length_, index);
348 E : uint8_t shadow = shadow_[index];
349 E : if (shadow == 0)
350 E : return true;
351 :
352 E : if (ShadowMarkerHelper::IsRedzone(shadow))
353 E : return false;
354 :
355 E : return start < shadow;
356 E : }
357 :
358 E : bool Shadow::IsRangeAccessible(const void* addr, size_t size) const {
359 E : DCHECK_NE(static_cast<const void*>(nullptr), addr);
360 :
361 : // A zero byte access is always valid.
362 E : if (size == 0U)
363 E : return true;
364 :
365 E : uintptr_t start_addr = reinterpret_cast<uintptr_t>(addr);
366 E : uintptr_t start = start_addr;
367 E : start >>= kShadowRatioLog;
368 :
369 E : DCHECK_EQ(reinterpret_cast<uintptr_t>(addr),
370 E : (start << kShadowRatioLog) + (start_addr & (kShadowRatio - 1)));
371 E : if (start > length_)
372 i : return false;
373 :
374 : // Validate that the start point is accessible.
375 E : uint8_t shadow = shadow_[start];
376 E : if (shadow != 0U) {
377 E : if (ShadowMarkerHelper::IsRedzone(shadow))
378 E : return false;
379 E : if (start < shadow)
380 i : return false;
381 : }
382 :
383 E : uintptr_t end = reinterpret_cast<uintptr_t>(addr) + size;
384 : // Overflow on addr + size.
385 E : if (start_addr > end)
386 E : return false;
387 :
388 E : size_t end_offs = end & (kShadowRatio - 1);
389 E : end >>= kShadowRatioLog;
390 E : if (end > length_)
391 i : return false;
392 :
393 : // Now run over the shadow bytes from start to end, which all need to be
394 : // zero.
395 E : if (!internal::IsZeroBufferImpl<uint64_t>(&shadow_[start], &shadow_[end]))
396 E : return false;
397 :
398 : // Finally test the end point if there's a tail offset.
399 E : if (end_offs == 0U)
400 E : return true;
401 :
402 E : shadow = shadow_[end];
403 E : if (shadow == 0U)
404 E : return true;
405 :
406 E : if (ShadowMarkerHelper::IsRedzone(shadow))
407 E : return false;
408 E : if (end_offs > shadow)
409 E : return false;
410 :
411 E : return true;
412 E : }
413 :
414 E : const void* Shadow::FindFirstPoisonedByte(const void* addr, size_t size) const {
415 E : DCHECK_NE(static_cast<const void*>(nullptr), addr);
416 :
417 : // A zero byte access is always valid.
418 E : if (size == 0U)
419 E : return nullptr;
420 :
421 E : const uint8_t* out_addr = reinterpret_cast<const uint8_t*>(addr);
422 E : uintptr_t start_addr = reinterpret_cast<uintptr_t>(addr);
423 E : uintptr_t start = start_addr;
424 E : size_t start_offs = start & (kShadowRatio - 1);
425 E : start >>= kShadowRatioLog;
426 :
427 E : DCHECK_EQ(reinterpret_cast<uintptr_t>(addr),
428 E : (start << kShadowRatioLog) + (start_offs));
429 E : if (start > length_)
430 i : return out_addr;
431 :
432 : // Validate that the start point is accessible.
433 E : uint8_t shadow = shadow_[start];
434 E : if (shadow != 0U) {
435 E : if (ShadowMarkerHelper::IsRedzone(shadow))
436 E : return out_addr;
437 E : if (start_offs > shadow)
438 i : return out_addr;
439 : }
440 :
441 E : uintptr_t end = reinterpret_cast<uintptr_t>(addr) + size;
442 : // Overflow on addr + size.
443 E : if (start_addr > end)
444 E : return out_addr;
445 :
446 E : size_t end_offs = end & (kShadowRatio - 1);
447 E : end >>= kShadowRatioLog;
448 E : if (end > length_)
449 i : return out_addr;
450 :
451 E : for (size_t curr = start; curr < end; ++curr, out_addr += kShadowRatio) {
452 E : shadow = shadow_[curr];
453 E : if (ShadowMarkerHelper::IsRedzone(shadow))
454 E : return out_addr;
455 E : if (shadow != 0)
456 E : return out_addr + shadow;
457 E : }
458 :
459 : // Finally test the end point if there's a tail offset.
460 E : if (end_offs == 0U)
461 E : return nullptr;
462 :
463 E : shadow = shadow_[end];
464 E : if (shadow == 0U)
465 E : return nullptr;
466 :
467 E : if (ShadowMarkerHelper::IsRedzone(shadow))
468 E : return out_addr;
469 E : if (end_offs > shadow)
470 E : return out_addr + shadow;
471 :
472 E : return nullptr;
473 E : }
474 :
475 E : bool Shadow::IsLeftRedzone(const void* address) const {
476 E : return ShadowMarkerHelper::IsActiveLeftRedzone(
477 : GetShadowMarkerForAddress(address));
478 E : }
479 :
480 E : bool Shadow::IsRightRedzone(const void* address) const {
481 E : uintptr_t index = reinterpret_cast<uintptr_t>(address);
482 E : uintptr_t start = index & 0x7;
483 :
484 E : index >>= kShadowRatioLog;
485 :
486 E : DCHECK_GT(length_, index);
487 E : uint8_t marker = shadow_[index];
488 :
489 : // If the marker is for accessible memory then some addresses may be part
490 : // of a right redzone, assuming that the *next* marker in the shadow is for
491 : // a right redzone.
492 E : if (marker == 0)
493 E : return false;
494 E : if (marker <= kHeapPartiallyAddressableByte7) {
495 E : if (index == length_)
496 i : return false;
497 E : if (!ShadowMarkerHelper::IsActiveRightRedzone(shadow_[index + 1]))
498 i : return false;
499 E : return start >= marker;
500 : }
501 :
502 : // Otherwise, check the marker directly.
503 E : return ShadowMarkerHelper::IsActiveRightRedzone(marker);
504 E : }
505 :
506 : bool Shadow::IsBlockStartByte(const void* address) const {
507 : uintptr_t index = reinterpret_cast<uintptr_t>(address);
508 : uintptr_t start = index & 0x7;
509 :
510 : index >>= kShadowRatioLog;
511 :
512 : DCHECK_GT(length_, index);
513 : uint8_t marker = shadow_[index];
514 :
515 : if (start != 0)
516 : return false;
517 : if (!ShadowMarkerHelper::IsActiveBlockStart(marker))
518 : return false;
519 :
520 : return true;
521 : }
522 :
523 E : const uint8_t* Shadow::GetShadowMemoryForAddress(const void* addr) const {
524 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
525 E : index >>= kShadowRatioLog;
526 E : DCHECK_GE(length_, index);
527 E : return shadow_ + index;
528 E : }
529 :
530 E : ShadowMarker Shadow::GetShadowMarkerForAddress(const void* addr) const {
531 E : return static_cast<ShadowMarker>(*GetShadowMemoryForAddress(addr));
532 E : }
533 :
534 E : void Shadow::PoisonAllocatedBlock(const BlockInfo& info) {
535 : static_assert((sizeof(BlockHeader) % kShadowRatio) == 0, "Bad header size.");
536 E : DCHECK(info.header->state == ALLOCATED_BLOCK);
537 :
538 : // Translate the block address to an offset. Sanity check a whole bunch
539 : // of things that we require to be true for the shadow to have 100%
540 : // fidelity.
541 E : uintptr_t index = reinterpret_cast<uintptr_t>(info.header);
542 E : DCHECK(::common::IsAligned(index, kShadowRatio));
543 E : DCHECK(::common::IsAligned(info.header_padding_size, kShadowRatio));
544 E : DCHECK(::common::IsAligned(info.block_size, kShadowRatio));
545 E : index /= kShadowRatio;
546 :
547 : // Determine the distribution of bytes in the shadow.
548 E : size_t left_redzone_bytes = info.TotalHeaderSize() / kShadowRatio;
549 E : size_t body_bytes = (info.body_size + kShadowRatio - 1) / kShadowRatio;
550 E : size_t block_bytes = info.block_size / kShadowRatio;
551 E : size_t right_redzone_bytes = block_bytes - left_redzone_bytes - body_bytes;
552 :
553 : // Determine the marker byte for the header. This encodes the length of the
554 : // body of the allocation modulo the shadow ratio, so that the exact length
555 : // can be inferred from inspecting the shadow memory.
556 E : uint8_t body_size_mod = info.body_size % kShadowRatio;
557 E : uint8_t header_marker = ShadowMarkerHelper::BuildBlockStart(
558 : true, info.header->is_nested, body_size_mod);
559 :
560 : // Determine the marker byte for the trailer.
561 : uint8_t trailer_marker =
562 E : ShadowMarkerHelper::BuildBlockEnd(true, info.header->is_nested);
563 :
564 : // Poison the header and left padding.
565 E : uint8_t* cursor = shadow_ + index;
566 E : ::memset(cursor, header_marker, 1);
567 E : ::memset(cursor + 1, kHeapLeftPaddingMarker, left_redzone_bytes - 1);
568 E : cursor += left_redzone_bytes;
569 E : ::memset(cursor, kHeapAddressableMarker, body_bytes);
570 E : cursor += body_bytes;
571 :
572 : // Poison the right padding and the trailer.
573 E : if (body_size_mod > 0)
574 E : cursor[-1] = body_size_mod;
575 E : ::memset(cursor, kHeapRightPaddingMarker, right_redzone_bytes - 1);
576 E : ::memset(cursor + right_redzone_bytes - 1, trailer_marker, 1);
577 :
578 E : SetShadowMemory(info.header,
579 : info.TotalHeaderSize(),
580 : kHeapLeftPaddingMarker);
581 E : SetShadowMemory(info.body, info.body_size, kHeapAddressableMarker);
582 E : SetShadowMemory(info.trailer_padding,
583 : info.TotalTrailerSize(),
584 : kHeapRightPaddingMarker);
585 E : }
586 :
587 E : bool Shadow::BlockIsNested(const BlockInfo& info) const {
588 E : uint8_t marker = GetShadowMarkerForAddress(info.header);
589 E : DCHECK(ShadowMarkerHelper::IsActiveBlockStart(marker));
590 E : return ShadowMarkerHelper::IsNestedBlockStart(marker);
591 E : }
592 :
593 : bool Shadow::BlockInfoFromShadow(
594 E : const void* addr, CompactBlockInfo* info) const {
595 E : DCHECK_NE(static_cast<void*>(NULL), addr);
596 E : DCHECK_NE(static_cast<CompactBlockInfo*>(NULL), info);
597 E : if (!BlockInfoFromShadowImpl(0, addr, info))
598 E : return false;
599 E : return true;
600 E : }
601 :
602 E : bool Shadow::BlockInfoFromShadow(const void* addr, BlockInfo* info) const {
603 E : DCHECK_NE(static_cast<void*>(NULL), addr);
604 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), info);
605 E : CompactBlockInfo compact = {};
606 E : if (!BlockInfoFromShadow(addr, &compact))
607 E : return false;
608 E : ConvertBlockInfo(compact, info);
609 E : return true;
610 E : }
611 :
612 : bool Shadow::ParentBlockInfoFromShadow(const BlockInfo& nested,
613 E : BlockInfo* info) const {
614 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), info);
615 E : if (!BlockIsNested(nested))
616 E : return false;
617 E : CompactBlockInfo compact = {};
618 E : if (!BlockInfoFromShadowImpl(1, nested.header, &compact))
619 i : return false;
620 E : ConvertBlockInfo(compact, info);
621 E : return true;
622 E : }
623 :
624 E : bool Shadow::IsBeginningOfBlockBody(const void* addr) const {
625 E : DCHECK_NE(static_cast<void*>(NULL), addr);
626 : // If the block has a non-zero body size then the beginning of the body will
627 : // be accessible or tagged as freed.
628 : // If the block has an empty body then the beginning of the body will be a
629 : // right redzone.
630 E : if (IsAccessible(addr) || IsRightRedzone(addr) ||
631 : GetShadowMarkerForAddress(addr) == kHeapFreedMarker) {
632 E : return IsLeftRedzone(reinterpret_cast<const uint8_t*>(addr) - 1);
633 : }
634 E : return false;
635 E : }
636 :
637 E : bool Shadow::PageIsProtected(const void* addr) const {
638 : // Since the page bit is read very frequently this is not performed
639 : // under a lock. The values change quite rarely, so this will almost always
640 : // be correct. However, consumers of this knowledge have to be robust to
641 : // getting incorrect data.
642 E : size_t index = 0;
643 E : uint8_t mask = 0;
644 E : AddressToPageMask(addr, &index, &mask);
645 E : return (page_bits_[index] & mask) == mask;
646 E : }
647 :
648 E : void Shadow::MarkPageProtected(const void* addr) {
649 E : size_t index = 0;
650 E : uint8_t mask = 0;
651 E : AddressToPageMask(addr, &index, &mask);
652 :
653 E : base::AutoLock lock(page_bits_lock_);
654 E : page_bits_[index] |= mask;
655 E : }
656 :
657 E : void Shadow::MarkPageUnprotected(const void* addr) {
658 E : size_t index = 0;
659 E : uint8_t mask = 0;
660 E : AddressToPageMask(addr, &index, &mask);
661 E : mask = ~mask;
662 :
663 E : base::AutoLock lock(page_bits_lock_);
664 E : page_bits_[index] &= mask;
665 E : }
666 :
667 E : void Shadow::MarkPagesProtected(const void* addr, size_t size) {
668 E : const uint8_t* page = reinterpret_cast<const uint8_t*>(addr);
669 E : const uint8_t* page_end = page + size;
670 E : size_t index = 0;
671 E : uint8_t mask = 0;
672 :
673 E : base::AutoLock lock(page_bits_lock_);
674 E : while (page < page_end) {
675 E : AddressToPageMask(page, &index, &mask);
676 E : page_bits_[index] |= mask;
677 E : page += kPageSize;
678 E : }
679 E : }
680 :
681 E : void Shadow::MarkPagesUnprotected(const void* addr, size_t size) {
682 E : const uint8_t* page = reinterpret_cast<const uint8_t*>(addr);
683 E : const uint8_t* page_end = page + size;
684 E : size_t index = 0;
685 E : uint8_t mask = 0;
686 :
687 E : base::AutoLock lock(page_bits_lock_);
688 E : while (page < page_end) {
689 E : AddressToPageMask(page, &index, &mask);
690 E : page_bits_[index] &= ~mask;
691 E : page += kPageSize;
692 E : }
693 E : }
694 :
695 : void Shadow::AppendShadowByteText(const char *prefix,
696 : uintptr_t index,
697 : std::string* output,
698 E : size_t bug_index) const {
699 E : base::StringAppendF(
700 : output,
701 : "%s0x%08x:",
702 : prefix,
703 : reinterpret_cast<void*>(index << kShadowRatioLog));
704 E : char separator = ' ';
705 E : for (uint32_t i = 0; i < kShadowBytesPerLine; i++) {
706 E : if (index + i == bug_index)
707 E : separator = '[';
708 E : uint8_t shadow_value = shadow_[index + i];
709 E : base::StringAppendF(
710 : output, "%c%x%x", separator, shadow_value >> 4, shadow_value & 15);
711 E : if (separator == '[')
712 E : separator = ']';
713 E : else if (separator == ']')
714 E : separator = ' ';
715 E : }
716 E : if (separator == ']')
717 E : base::StringAppendF(output, "]");
718 E : base::StringAppendF(output, "\n");
719 E : }
720 :
721 : void Shadow::AppendShadowArrayText(
722 E : const void* addr, std::string* output) const {
723 E : uintptr_t index = reinterpret_cast<uintptr_t>(addr);
724 E : index >>= kShadowRatioLog;
725 E : size_t index_start = index;
726 E : index_start /= kShadowBytesPerLine;
727 E : index_start *= kShadowBytesPerLine;
728 E : for (int i = -static_cast<int>(kShadowContextLines);
729 E : i <= static_cast<int>(kShadowContextLines); i++) {
730 E : const char * const prefix = (i == 0) ? "=>" : " ";
731 E : AppendShadowByteText(
732 : prefix, (index_start + i * kShadowBytesPerLine), output, index);
733 E : }
734 E : }
735 :
736 : void Shadow::AppendShadowMemoryText(
737 E : const void* addr, std::string* output) const {
738 E : base::StringAppendF(output, "Shadow bytes around the buggy address:\n");
739 E : AppendShadowArrayText(addr, output);
740 E : base::StringAppendF(output, "Shadow byte legend (one shadow byte represents "
741 : "8 application bytes):\n");
742 E : base::StringAppendF(output, " Addressable: 00\n");
743 E : base::StringAppendF(output, " Partially addressable: 01 - 07\n");
744 E : base::StringAppendF(output, " Block start redzone: %02x - %02x\n",
745 : kHeapBlockStartMarker0, kHeapBlockStartMarker7);
746 E : base::StringAppendF(output, " Nested block start: %02x - %02x\n",
747 : kHeapNestedBlockStartMarker0,
748 : kHeapNestedBlockStartMarker7);
749 E : base::StringAppendF(output, " Asan memory byte: %02x\n",
750 : kAsanMemoryMarker);
751 E : base::StringAppendF(output, " Invalid address: %02x\n",
752 : kInvalidAddressMarker);
753 E : base::StringAppendF(output, " User redzone: %02x\n",
754 : kUserRedzoneMarker);
755 E : base::StringAppendF(output, " Block end redzone: %02x\n",
756 : kHeapBlockEndMarker);
757 E : base::StringAppendF(output, " Nested block end: %02x\n",
758 : kHeapNestedBlockEndMarker);
759 E : base::StringAppendF(output, " Heap left redzone: %02x\n",
760 : kHeapLeftPaddingMarker);
761 E : base::StringAppendF(output, " Heap right redzone: %02x\n",
762 : kHeapRightPaddingMarker);
763 E : base::StringAppendF(output, " Asan reserved byte: %02x\n",
764 : kAsanReservedMarker);
765 E : base::StringAppendF(output, " Freed heap region: %02x\n",
766 : kHeapFreedMarker);
767 E : }
768 :
769 : size_t Shadow::GetAllocSize(const uint8_t* mem) const {
770 : BlockInfo block_info = {};
771 : if (!Shadow::BlockInfoFromShadow(mem, &block_info))
772 : return 0;
773 : return block_info.block_size;
774 : }
775 :
776 : bool Shadow::ScanLeftForBracketingBlockStart(
777 E : size_t initial_nesting_depth, size_t cursor, size_t* location) const {
778 E : DCHECK_NE(static_cast<size_t*>(NULL), location);
779 :
780 : static const size_t kLowerBound = kAddressLowerBound / kShadowRatio;
781 :
782 E : size_t left = cursor;
783 E : int nesting_depth = static_cast<int>(initial_nesting_depth);
784 E : if (ShadowMarkerHelper::IsBlockEnd(shadow_[left]))
785 E : --nesting_depth;
786 E : while (true) {
787 E : if (ShadowMarkerHelper::IsBlockStart(shadow_[left])) {
788 E : if (nesting_depth == 0) {
789 E : *location = left;
790 E : return true;
791 : }
792 : // If this is not a nested block then there's no hope of finding a
793 : // block containing the original cursor.
794 E : if (!ShadowMarkerHelper::IsNestedBlockStart(shadow_[left]))
795 E : return false;
796 E : --nesting_depth;
797 E : } else if (ShadowMarkerHelper::IsBlockEnd(shadow_[left])) {
798 E : ++nesting_depth;
799 :
800 : // If we encounter the end of a non-nested block there's no way for
801 : // a block to bracket us.
802 E : if (nesting_depth > 0 &&
803 : !ShadowMarkerHelper::IsNestedBlockEnd(shadow_[left])) {
804 E : return false;
805 : }
806 : }
807 E : if (left <= kLowerBound)
808 E : return false;
809 E : --left;
810 E : }
811 :
812 i : NOTREACHED();
813 E : }
814 :
815 : namespace {
816 :
817 : // This handles an unaligned input cursor. It can potentially read up to 7
818 : // bytes past the end of the cursor, but only up to an 8 byte boundary. Thus
819 : // this out of bounds access is safe.
820 : inline const uint8_t* ScanRightForPotentialHeaderBytes(const uint8_t* pos,
821 E : const uint8_t* end) {
822 E : DCHECK(::common::IsAligned(end, 8));
823 :
824 : // Handle the first few bytes that aren't aligned. If pos == end then
825 : // it is already 8-byte aligned and we'll simply fall through to the end.
826 : // This is a kind of Duffy's device that takes bytes 'as large as possible'
827 : // until we reach an 8 byte alignment.
828 E : switch (reinterpret_cast<uintptr_t>(pos) & 0x7) {
829 : case 1:
830 E : if (*pos != 0 && *pos != kFreedMarker8)
831 E : return pos;
832 E : pos += 1;
833 : case 2:
834 E : if (*reinterpret_cast<const uint16_t*>(pos) != 0 &&
835 : *reinterpret_cast<const uint16_t*>(pos) != kFreedMarker16) {
836 E : return pos;
837 : }
838 E : pos += 2;
839 : case 4:
840 E : if (*reinterpret_cast<const uint32_t*>(pos) != 0 &&
841 : *reinterpret_cast<const uint32_t*>(pos) != kFreedMarker32) {
842 E : return pos;
843 : }
844 E : pos += 4;
845 E : break;
846 :
847 : case 3:
848 E : if (*pos != 0 && *pos != kFreedMarker8)
849 E : return pos;
850 E : pos += 1;
851 : // Now have alignment of 4.
852 E : if (*reinterpret_cast<const uint32_t*>(pos) != 0 &&
853 : *reinterpret_cast<const uint32_t*>(pos) != kFreedMarker32) {
854 E : return pos;
855 : }
856 E : pos += 4;
857 E : break;
858 :
859 : case 5:
860 E : if (*pos != 0 && *pos != kFreedMarker8)
861 E : return pos;
862 E : pos += 1;
863 : case 6:
864 E : if (*reinterpret_cast<const uint16_t*>(pos) != 0 &&
865 : *reinterpret_cast<const uint16_t*>(pos) != kFreedMarker16) {
866 E : return pos;
867 : }
868 E : pos += 2;
869 E : break;
870 :
871 : case 7:
872 E : if (*pos != 0 && *pos != kFreedMarker8)
873 E : return pos;
874 E : pos += 1;
875 :
876 : case 0:
877 : default:
878 : // Do nothing, as we're already 8 byte aligned.
879 : break;
880 : }
881 :
882 : // Handle the 8-byte aligned bytes as much as we can.
883 E : while (pos < end) {
884 E : if (*reinterpret_cast<const uint64_t*>(pos) != 0 &&
885 : *reinterpret_cast<const uint64_t*>(pos) != kFreedMarker64) {
886 E : return pos;
887 : }
888 E : pos += 8;
889 E : }
890 :
891 i : return pos;
892 E : }
893 :
894 : } // namespace
895 :
896 : bool Shadow::ScanRightForBracketingBlockEnd(
897 E : size_t initial_nesting_depth, size_t cursor, size_t* location) const {
898 E : DCHECK_NE(static_cast<size_t*>(NULL), location);
899 :
900 E : const uint8_t* shadow_end = shadow_ + length_;
901 E : const uint8_t* pos = shadow_ + cursor;
902 E : int nesting_depth = static_cast<int>(initial_nesting_depth);
903 E : if (ShadowMarkerHelper::IsBlockStart(*pos))
904 E : --nesting_depth;
905 E : while (pos < shadow_end) {
906 : // Skips past as many addressable and freed bytes as possible.
907 E : pos = ScanRightForPotentialHeaderBytes(pos, shadow_end);
908 E : if (pos == shadow_end)
909 i : return false;
910 :
911 : // When the above loop exits early then somewhere in the next 8 bytes
912 : // there's non-addressable data that isn't 'freed'. Look byte by byte to
913 : // see what's up.
914 :
915 E : if (ShadowMarkerHelper::IsBlockEnd(*pos)) {
916 E : if (nesting_depth == 0) {
917 E : *location = pos - shadow_;
918 E : return true;
919 : }
920 E : if (!ShadowMarkerHelper::IsNestedBlockEnd(*pos))
921 E : return false;
922 E : --nesting_depth;
923 E : } else if (ShadowMarkerHelper::IsBlockStart(*pos)) {
924 E : ++nesting_depth;
925 :
926 : // If we encounter the beginning of a non-nested block then there's
927 : // clearly no way for any block to bracket us.
928 E : if (nesting_depth > 0 &&
929 : !ShadowMarkerHelper::IsNestedBlockStart(*pos)) {
930 E : return false;
931 : }
932 : }
933 E : ++pos;
934 E : }
935 i : return false;
936 E : }
937 :
938 : bool Shadow::BlockInfoFromShadowImpl(
939 : size_t initial_nesting_depth,
940 : const void* addr,
941 E : CompactBlockInfo* info) const {
942 E : DCHECK_NE(static_cast<void*>(NULL), addr);
943 E : DCHECK_NE(static_cast<CompactBlockInfo*>(NULL), info);
944 :
945 : // Convert the address to an offset in the shadow memory.
946 E : size_t left = reinterpret_cast<uintptr_t>(addr) / kShadowRatio;
947 E : size_t right = left;
948 :
949 E : if (!ScanLeftForBracketingBlockStart(initial_nesting_depth, left, &left))
950 E : return false;
951 E : if (!ScanRightForBracketingBlockEnd(initial_nesting_depth, right, &right))
952 i : return false;
953 E : ++right;
954 :
955 E : uint8_t* block = reinterpret_cast<uint8_t*>(left * kShadowRatio);
956 E : info->header = reinterpret_cast<BlockHeader*>(block);
957 E : info->block_size = static_cast<uint32_t>((right - left) * kShadowRatio);
958 :
959 : // Get the length of the body modulo the shadow ratio.
960 E : size_t body_size_mod = ShadowMarkerHelper::GetBlockStartData(shadow_[left]);
961 E : info->is_nested = ShadowMarkerHelper::IsNestedBlockStart(shadow_[left]);
962 :
963 : // Find the beginning of the body (end of the left redzone).
964 E : ++left;
965 E : while (left < right && shadow_[left] == kHeapLeftPaddingMarker)
966 E : ++left;
967 :
968 : // Find the beginning of the right redzone (end of the body).
969 E : --right;
970 E : while (right > left && shadow_[right - 1] == kHeapRightPaddingMarker)
971 E : --right;
972 :
973 : // Calculate the body location and size.
974 E : uint8_t* body = reinterpret_cast<uint8_t*>(left * kShadowRatio);
975 E : size_t body_size = (right - left) * kShadowRatio;
976 E : if (body_size_mod > 0) {
977 E : DCHECK_LE(8u, body_size);
978 E : body_size = body_size - kShadowRatio + body_size_mod;
979 : }
980 :
981 : // Fill out header and trailer sizes.
982 E : info->header_size = body - block;
983 E : info->trailer_size = info->block_size - body_size - info->header_size;
984 :
985 E : return true;
986 E : }
987 :
988 : ShadowWalker::ShadowWalker(const Shadow* shadow,
989 : bool recursive,
990 : const void* lower_bound,
991 : const void* upper_bound)
992 E : : shadow_(shadow), recursive_(recursive), lower_bound_(0), upper_bound_(0),
993 E : cursor_(nullptr), shadow_cursor_(nullptr), nesting_depth_(0) {
994 E : DCHECK_NE(static_cast<Shadow*>(nullptr), shadow);
995 E : DCHECK_LE(Shadow::kAddressLowerBound, reinterpret_cast<size_t>(lower_bound));
996 E : DCHECK_GE(shadow->memory_size(), reinterpret_cast<size_t>(upper_bound));
997 E : DCHECK_LE(lower_bound, upper_bound);
998 :
999 E : lower_bound_ = ::common::AlignDown(
1000 : reinterpret_cast<const uint8_t*>(lower_bound), kShadowRatio);
1001 E : upper_bound_ = ::common::AlignUp(
1002 : reinterpret_cast<const uint8_t*>(upper_bound), kShadowRatio);
1003 E : Reset();
1004 E : }
1005 :
1006 E : void ShadowWalker::Reset() {
1007 : // Walk to the beginning of the first non-nested block, or to the end
1008 : // of the range, whichever comes first.
1009 E : nesting_depth_ = -1;
1010 E : for (cursor_ = lower_bound_; cursor_ != upper_bound_;
1011 E : cursor_ += kShadowRatio) {
1012 E : uint8_t marker = shadow_->GetShadowMarkerForAddress(cursor_);
1013 E : if (ShadowMarkerHelper::IsBlockStart(marker) &&
1014 : !ShadowMarkerHelper::IsNestedBlockStart(marker)) {
1015 E : break;
1016 : }
1017 E : }
1018 :
1019 E : shadow_cursor_ = shadow_->GetShadowMemoryForAddress(cursor_);
1020 E : }
1021 :
1022 E : bool ShadowWalker::Next(BlockInfo* info) {
1023 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), info);
1024 :
1025 : // Iterate until a reportable block is encountered, or the slab is exhausted.
1026 E : for (; cursor_ != upper_bound_; cursor_ += kShadowRatio) {
1027 E : uint8_t marker = shadow_->GetShadowMarkerForAddress(cursor_);
1028 :
1029 : // Update the nesting depth when block end markers are encountered.
1030 E : if (ShadowMarkerHelper::IsBlockEnd(marker)) {
1031 E : DCHECK_LE(0, nesting_depth_);
1032 E : --nesting_depth_;
1033 E : continue;
1034 : }
1035 :
1036 : // Look for a block start marker.
1037 E : if (ShadowMarkerHelper::IsBlockStart(marker)) {
1038 : // Update the nesting depth when block start bytes are encountered.
1039 E : ++nesting_depth_;
1040 :
1041 : // Non-nested blocks should only be encountered at depth 0.
1042 E : bool is_nested = ShadowMarkerHelper::IsNestedBlockStart(marker);
1043 E : DCHECK(is_nested || nesting_depth_ == 0);
1044 :
1045 : // Determine if the block is to be reported.
1046 E : if (!is_nested || recursive_) {
1047 : // This can only fail if the shadow memory is malformed.
1048 E : CHECK(shadow_->BlockInfoFromShadow(cursor_, info));
1049 :
1050 : // In a recursive descent we have to process body contents.
1051 E : if (recursive_) {
1052 E : cursor_ += kShadowRatio;
1053 E : } else {
1054 : // Otherwise we can skip the body of the block we just reported.
1055 : // We skip directly to the end marker (but not past it so that depth
1056 : // bookkeeping works properly).
1057 E : cursor_ += info->block_size - kShadowRatio;
1058 : }
1059 :
1060 E : shadow_cursor_ = shadow_->GetShadowMemoryForAddress(cursor_);
1061 E : return true;
1062 : }
1063 i : continue;
1064 : }
1065 E : }
1066 :
1067 E : return false;
1068 E : }
1069 :
1070 : } // namespace asan
1071 : } // namespace agent
|