1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/asan_heap.h"
16 :
17 : #include <algorithm>
18 :
19 : #include "base/logging.h"
20 : #include "base/string_util.h"
21 : #include "base/stringprintf.h"
22 : #include "base/time.h"
23 : #include "base/debug/alias.h"
24 : #include "base/debug/stack_trace.h"
25 : #include "base/strings/sys_string_conversions.h"
26 : #include "syzygy/agent/asan/asan_runtime.h"
27 : #include "syzygy/agent/asan/asan_shadow.h"
28 : #include "syzygy/common/align.h"
29 : #include "syzygy/trace/common/clock.h"
30 :
31 : namespace agent {
32 : namespace asan {
33 : namespace {
34 :
35 : typedef StackCapture::StackId StackId;
36 :
37 : // The first 64kB of the memory are not addressable.
38 : const uint8* kAddressLowerLimit = reinterpret_cast<uint8*>(0x10000);
39 :
40 : // Utility class which implements an auto lock for a HeapProxy.
41 : class HeapLocker {
42 : public:
43 : explicit HeapLocker(HeapProxy* const heap) : heap_(heap) {
44 : DCHECK(heap != NULL);
45 : if (!heap->Lock()) {
46 : LOG(ERROR) << "Unable to lock the heap.";
47 : }
48 : }
49 :
50 : ~HeapLocker() {
51 : DCHECK(heap_ != NULL);
52 : if (!heap_->Unlock()) {
53 : LOG(ERROR) << "Unable to lock the heap.";
54 : }
55 : }
56 :
57 : private:
58 : HeapProxy* const heap_;
59 :
60 : DISALLOW_COPY_AND_ASSIGN(HeapLocker);
61 : };
62 :
63 : // Returns the number of CPU cycles per microsecond.
64 E : double GetCpuCyclesPerUs() {
65 E : trace::common::TimerInfo tsc_info = {};
66 E : trace::common::GetTscTimerInfo(&tsc_info);
67 :
68 E : if (tsc_info.frequency != 0) {
69 : return (tsc_info.frequency /
70 E : static_cast<double>(base::Time::kMicrosecondsPerSecond));
71 i : } else {
72 i : uint64 cycle_start = trace::common::GetTsc();
73 i : ::Sleep(HeapProxy::kSleepTimeForApproximatingCPUFrequency);
74 : return (trace::common::GetTsc() - cycle_start) /
75 : (HeapProxy::kSleepTimeForApproximatingCPUFrequency *
76 i : static_cast<double>(base::Time::kMicrosecondsPerSecond));
77 : }
78 E : }
79 :
80 : // Verify that the memory range [mem, mem + len[ is accessible.
81 E : bool MemoryRangeIsAccessible(uint8* mem, size_t len) {
82 E : for (size_t i = 0; i < len; ++i) {
83 E : if (!Shadow::IsAccessible(mem + i))
84 i : return false;
85 E : }
86 E : return true;
87 E : }
88 :
89 : } // namespace
90 :
91 : StackCaptureCache* HeapProxy::stack_cache_ = NULL;
92 : double HeapProxy::cpu_cycles_per_us_ = 0.0;
93 : // The default quarantine size for a new Heap.
94 : size_t HeapProxy::default_quarantine_max_size_ = kDefaultQuarantineMaxSize;
95 : size_t HeapProxy::trailer_padding_size_ = kDefaultTrailerPaddingSize;
96 : const char* HeapProxy::kHeapUseAfterFree = "heap-use-after-free";
97 : const char* HeapProxy::kHeapBufferUnderFlow = "heap-buffer-underflow";
98 : const char* HeapProxy::kHeapBufferOverFlow = "heap-buffer-overflow";
99 : const char* HeapProxy::kAttemptingDoubleFree = "attempting double-free";
100 : const char* HeapProxy::kInvalidAddress = "invalid address";
101 : const char* HeapProxy::kWildAccess = "wild access";
102 : const char* HeapProxy::kHeapUnknownError = "heap-unknown-error";
103 :
104 : HeapProxy::HeapProxy()
105 : : heap_(NULL),
106 : head_(NULL),
107 : tail_(NULL),
108 : quarantine_size_(0),
109 E : quarantine_max_size_(0) {
110 E : }
111 :
112 E : HeapProxy::~HeapProxy() {
113 E : if (heap_ != NULL)
114 i : Destroy();
115 :
116 E : DCHECK(heap_ == NULL);
117 E : }
118 :
119 E : void HeapProxy::Init(StackCaptureCache* cache) {
120 E : DCHECK(cache != NULL);
121 E : default_quarantine_max_size_ = kDefaultQuarantineMaxSize;
122 E : trailer_padding_size_ = kDefaultTrailerPaddingSize;
123 E : stack_cache_ = cache;
124 E : }
125 :
126 E : HANDLE HeapProxy::ToHandle(HeapProxy* proxy) {
127 E : DCHECK(proxy != NULL);
128 E : return proxy;
129 E : }
130 :
131 E : HeapProxy* HeapProxy::FromHandle(HANDLE heap) {
132 E : DCHECK(heap != NULL);
133 E : return reinterpret_cast<HeapProxy*>(heap);
134 E : }
135 :
136 : bool HeapProxy::Create(DWORD options,
137 : size_t initial_size,
138 E : size_t maximum_size) {
139 E : DCHECK(heap_ == NULL);
140 :
141 E : SetQuarantineMaxSize(default_quarantine_max_size_);
142 :
143 E : HANDLE heap_new = ::HeapCreate(options, initial_size, maximum_size);
144 E : if (heap_new == NULL)
145 E : return false;
146 :
147 E : heap_ = heap_new;
148 :
149 E : return true;
150 E : }
151 :
152 E : bool HeapProxy::Destroy() {
153 E : DCHECK(heap_ != NULL);
154 :
155 : // Flush the quarantine.
156 E : SetQuarantineMaxSize(0);
157 :
158 E : if (!::HeapDestroy(heap_))
159 i : return false;
160 :
161 E : heap_ = NULL;
162 E : return true;
163 E : }
164 :
165 E : void* HeapProxy::Alloc(DWORD flags, size_t bytes) {
166 E : DCHECK(heap_ != NULL);
167 :
168 E : size_t alloc_size = GetAllocSize(bytes, kDefaultAllocGranularity);
169 :
170 : // GetAllocSize can return a smaller value if the alloc size is incorrect
171 : // (i.e. 0xffffffff).
172 E : if (alloc_size < bytes)
173 i : return NULL;
174 :
175 : uint8* block_mem = reinterpret_cast<uint8*>(
176 E : ::HeapAlloc(heap_, flags, alloc_size));
177 :
178 : // Capture the current stack. InitFromStack is inlined to preserve the
179 : // greatest number of stack frames.
180 E : StackCapture stack;
181 E : stack.InitFromStack();
182 :
183 : return InitializeAsanBlock(block_mem,
184 : bytes,
185 : alloc_size,
186 : kDefaultAllocGranularityLog,
187 E : stack);
188 E : }
189 :
190 : void* HeapProxy::InitializeAsanBlock(uint8* asan_pointer,
191 : size_t user_size,
192 : size_t asan_size,
193 : size_t alloc_granularity_log,
194 E : const StackCapture& stack) {
195 E : if (asan_pointer == NULL)
196 i : return NULL;
197 :
198 : // Here's the layout of an ASan block:
199 : //
200 : // +----------------------+--------------+-------+---------------+---------+
201 : // | Block Header Padding | Block Header | Block | Block Trailer | Padding |
202 : // +----------------------+--------------+-------+---------------+---------+
203 : //
204 : // Block Header padding (optional): This is only present when the block has an
205 : // alignment bigger than the size of the BlockHeader structure. In this
206 : // case the size of the padding will be stored at the beginning of this
207 : // zone. This zone is poisoned.
208 : // Block Header: A structure containing some of the metadata about this block.
209 : // This zone is poisoned.
210 : // Block: The user space. This zone isn't poisoned.
211 : // Block Trailer: A structure containing some of the metadata about this
212 : // block. This zone is poisoned.
213 : // Padding (optional): Some padding to align the size of the whole structure
214 : // to the block alignment. This zone is poisoned.
215 :
216 E : if (alloc_granularity_log < Shadow::kShadowGranularityLog)
217 i : alloc_granularity_log = Shadow::kShadowGranularityLog;
218 :
219 : size_t header_size = common::AlignUp(sizeof(BlockHeader),
220 E : 1 << alloc_granularity_log);
221 :
222 : BlockHeader* block_header = reinterpret_cast<BlockHeader*>(
223 E : asan_pointer + header_size - sizeof(BlockHeader));
224 :
225 : // Check if we need the block header padding size at the beginning of the
226 : // block.
227 E : if (header_size > sizeof(BlockHeader)) {
228 E : size_t padding_size = header_size - sizeof(BlockHeader);
229 E : DCHECK_GE(padding_size, sizeof(padding_size));
230 E : *(reinterpret_cast<size_t*>(asan_pointer)) = padding_size;
231 : }
232 :
233 : // Poison the block header.
234 E : size_t trailer_size = asan_size - user_size - header_size;
235 E : Shadow::Poison(asan_pointer, header_size, Shadow::kHeapLeftRedzone);
236 :
237 : // Initialize the block fields.
238 E : block_header->magic_number = kBlockHeaderSignature;
239 E : block_header->block_size = user_size;
240 E : block_header->state = ALLOCATED;
241 E : block_header->alloc_stack = stack_cache_->SaveStackTrace(stack);
242 E : block_header->free_stack = NULL;
243 E : block_header->alignment_log = alloc_granularity_log;
244 :
245 E : BlockTrailer* block_trailer = BlockHeaderToBlockTrailer(block_header);
246 E : block_trailer->free_tid = 0;
247 E : block_trailer->next_free_block = NULL;
248 E : block_trailer->alloc_tid = ::GetCurrentThreadId();
249 :
250 E : uint8* block_alloc = BlockHeaderToUserPointer(block_header);
251 E : DCHECK(MemoryRangeIsAccessible(block_alloc, user_size));
252 :
253 : // Poison the block trailer.
254 : Shadow::Poison(block_alloc + user_size,
255 : trailer_size,
256 E : Shadow::kHeapRightRedzone);
257 :
258 E : return block_alloc;
259 E : }
260 :
261 E : void* HeapProxy::ReAlloc(DWORD flags, void* mem, size_t bytes) {
262 E : DCHECK(heap_ != NULL);
263 :
264 : // Always fail in-place reallocation requests.
265 E : if ((flags & HEAP_REALLOC_IN_PLACE_ONLY) != 0)
266 E : return NULL;
267 :
268 E : void *new_mem = Alloc(flags, bytes);
269 : // Bail early if the new allocation didn't succeed
270 : // and avoid freeing the existing allocation.
271 E : if (new_mem == NULL)
272 i : return NULL;
273 :
274 E : if (mem != NULL) {
275 E : memcpy(new_mem, mem, std::min(bytes, Size(0, mem)));
276 E : Free(flags, mem);
277 : }
278 :
279 E : return new_mem;
280 E : }
281 :
282 E : bool HeapProxy::Free(DWORD flags, void* mem) {
283 E : DCHECK(heap_ != NULL);
284 E : BlockHeader* block = UserPointerToBlockHeader(mem);
285 : // The standard allows to call free on a null pointer. ToBlock returns null if
286 : // the given pointer is null so we return true here.
287 E : if (block == NULL)
288 i : return true;
289 :
290 E : DCHECK(BlockHeaderToUserPointer(block) == mem);
291 :
292 : // Capture the current stack.
293 E : StackCapture stack;
294 E : stack.InitFromStack();
295 :
296 : // Mark the block as quarantined.
297 E : if (!MarkBlockAsQuarantined(block, stack))
298 E : return false;
299 :
300 E : QuarantineBlock(block);
301 :
302 E : return true;
303 E : }
304 :
305 : void HeapProxy::MarkBlockAsQuarantined(void* asan_pointer,
306 E : const StackCapture& stack) {
307 E : DCHECK_NE(reinterpret_cast<void*>(NULL), asan_pointer);
308 E : BlockHeader* block_header = AsanPointerToBlockHeader(asan_pointer);
309 :
310 E : MarkBlockAsQuarantined(block_header, stack);
311 E : }
312 :
313 : bool HeapProxy::MarkBlockAsQuarantined(BlockHeader* block_header,
314 E : const StackCapture& stack) {
315 :
316 E : if (block_header->state != ALLOCATED) {
317 : // We're not supposed to see another kind of block here, the FREED state
318 : // is only applied to block after invalidating their magic number and freed
319 : // them.
320 E : DCHECK(block_header->state == QUARANTINED);
321 E : return false;
322 : }
323 :
324 E : block_header->free_stack = stack_cache_->SaveStackTrace(stack);
325 :
326 E : BlockTrailer* trailer = BlockHeaderToBlockTrailer(block_header);
327 E : trailer->free_timestamp = trace::common::GetTsc();
328 E : trailer->free_tid = ::GetCurrentThreadId();
329 :
330 E : block_header->state = QUARANTINED;
331 :
332 : // Poison the released alloc (marked as freed) and quarantine the block.
333 : // Note that the original data is left intact. This may make it easier
334 : // to debug a crash report/dump on access to a quarantined block.
335 E : uint8* mem = BlockHeaderToUserPointer(block_header);
336 E : Shadow::MarkAsFreed(mem, block_header->block_size);
337 :
338 E : return true;
339 E : }
340 :
341 E : size_t HeapProxy::Size(DWORD flags, const void* mem) {
342 E : DCHECK(heap_ != NULL);
343 E : BlockHeader* block = UserPointerToBlockHeader(mem);
344 E : if (block == NULL)
345 i : return -1;
346 :
347 E : return block->block_size;
348 E : }
349 :
350 E : bool HeapProxy::Validate(DWORD flags, const void* mem) {
351 E : DCHECK(heap_ != NULL);
352 E : return ::HeapValidate(heap_, flags, UserPointerToBlockHeader(mem)) == TRUE;
353 E : }
354 :
355 E : size_t HeapProxy::Compact(DWORD flags) {
356 E : DCHECK(heap_ != NULL);
357 E : return ::HeapCompact(heap_, flags);
358 E : }
359 :
360 E : bool HeapProxy::Lock() {
361 E : DCHECK(heap_ != NULL);
362 E : return ::HeapLock(heap_) == TRUE;
363 E : }
364 :
365 E : bool HeapProxy::Unlock() {
366 E : DCHECK(heap_ != NULL);
367 E : return ::HeapUnlock(heap_) == TRUE;
368 E : }
369 :
370 E : bool HeapProxy::Walk(PROCESS_HEAP_ENTRY* entry) {
371 E : DCHECK(heap_ != NULL);
372 E : return ::HeapWalk(heap_, entry) == TRUE;
373 E : }
374 :
375 : bool HeapProxy::SetInformation(HEAP_INFORMATION_CLASS info_class,
376 : void* info,
377 E : size_t info_length) {
378 E : DCHECK(heap_ != NULL);
379 E : return ::HeapSetInformation(heap_, info_class, info, info_length) == TRUE;
380 E : }
381 :
382 : bool HeapProxy::QueryInformation(HEAP_INFORMATION_CLASS info_class,
383 : void* info,
384 : size_t info_length,
385 E : unsigned long* return_length) {
386 E : DCHECK(heap_ != NULL);
387 : return ::HeapQueryInformation(heap_,
388 : info_class,
389 : info,
390 : info_length,
391 E : return_length) == TRUE;
392 E : }
393 :
394 E : void HeapProxy::SetQuarantineMaxSize(size_t quarantine_max_size) {
395 : {
396 E : base::AutoLock lock(lock_);
397 E : quarantine_max_size_ = quarantine_max_size;
398 E : }
399 :
400 E : TrimQuarantine();
401 E : }
402 :
403 E : void HeapProxy::TrimQuarantine() {
404 E : while (true) {
405 E : BlockHeader* free_block = NULL;
406 E : BlockTrailer* trailer = NULL;
407 E : size_t alloc_size = 0;
408 :
409 : // This code runs under a critical lock. Try to keep as much work out of
410 : // this scope as possible!
411 : {
412 E : base::AutoLock lock(lock_);
413 E : if (quarantine_size_ <= quarantine_max_size_)
414 E : return;
415 :
416 E : DCHECK(head_ != NULL);
417 E : DCHECK(tail_ != NULL);
418 :
419 E : free_block = head_;
420 E : trailer = BlockHeaderToBlockTrailer(free_block);
421 E : DCHECK(trailer != NULL);
422 :
423 E : head_ = trailer->next_free_block;
424 E : if (head_ == NULL)
425 E : tail_ = NULL;
426 :
427 : alloc_size = GetAllocSize(free_block->block_size,
428 E : kDefaultAllocGranularity);
429 :
430 E : DCHECK_GE(quarantine_size_, alloc_size);
431 E : quarantine_size_ -= alloc_size;
432 E : }
433 :
434 : // Clean up the block's metadata. We do this outside of the heap lock to
435 : // reduce contention.
436 E : ReleaseASanBlock(free_block, trailer);
437 :
438 E : Shadow::Unpoison(free_block, alloc_size);
439 E : ::HeapFree(heap_, 0, free_block);
440 E : }
441 E : }
442 :
443 E : void HeapProxy::DestroyAsanBlock(void* asan_pointer) {
444 E : DCHECK_NE(reinterpret_cast<void*>(NULL), asan_pointer);
445 :
446 E : BlockHeader* block_header = AsanPointerToBlockHeader(asan_pointer);
447 E : DCHECK_NE(reinterpret_cast<void*>(NULL), block_header);
448 E : BlockTrailer* block_trailer = BlockHeaderToBlockTrailer(block_header);
449 E : DCHECK_NE(reinterpret_cast<void*>(NULL), block_trailer);
450 :
451 E : ReleaseASanBlock(block_header, block_trailer);
452 E : }
453 :
454 : void HeapProxy::ReleaseASanBlock(BlockHeader* block_header,
455 E : BlockTrailer* block_trailer) {
456 E : DCHECK_NE(reinterpret_cast<void*>(NULL), block_header);
457 E : DCHECK_NE(reinterpret_cast<void*>(NULL), block_trailer);
458 :
459 : // Return pointers to the stacks for reference counting purposes.
460 E : if (block_header->alloc_stack != NULL) {
461 E : stack_cache_->ReleaseStackTrace(block_header->alloc_stack);
462 E : block_header->alloc_stack = NULL;
463 : }
464 E : if (block_header->free_stack != NULL) {
465 E : stack_cache_->ReleaseStackTrace(block_header->free_stack);
466 E : block_header->free_stack = NULL;
467 : }
468 :
469 E : block_header->state = FREED;
470 E : }
471 :
472 : void HeapProxy::CloneObject(const void* src_asan_pointer,
473 E : void* dst_asan_pointer) {
474 E : DCHECK_NE(reinterpret_cast<void*>(NULL), src_asan_pointer);
475 E : DCHECK_NE(reinterpret_cast<void*>(NULL), dst_asan_pointer);
476 :
477 : BlockHeader* block_header = AsanPointerToBlockHeader(
478 E : const_cast<void*>(src_asan_pointer));
479 E : DCHECK_NE(reinterpret_cast<void*>(NULL), block_header);
480 :
481 E : DCHECK_NE(reinterpret_cast<StackCapture*>(NULL), block_header->alloc_stack);
482 E : const_cast<StackCapture*>(block_header->alloc_stack)->AddRef();
483 E : if (block_header->free_stack != NULL)
484 E : const_cast<StackCapture*>(block_header->free_stack)->AddRef();
485 :
486 : size_t alloc_size = GetAllocSize(block_header->block_size,
487 E : 1 << block_header->alignment_log);
488 :
489 E : memcpy(dst_asan_pointer, src_asan_pointer, alloc_size);
490 :
491 E : Shadow::CloneShadowRange(src_asan_pointer, dst_asan_pointer, alloc_size);
492 E : }
493 :
494 E : void HeapProxy::QuarantineBlock(BlockHeader* block) {
495 E : DCHECK(block != NULL);
496 :
497 E : DCHECK(BlockHeaderToBlockTrailer(block)->next_free_block == NULL);
498 : size_t alloc_size = GetAllocSize(block->block_size,
499 E : kDefaultAllocGranularity);
500 :
501 : {
502 E : base::AutoLock lock(lock_);
503 :
504 E : quarantine_size_ += alloc_size;
505 E : if (tail_ != NULL) {
506 E : BlockHeaderToBlockTrailer(tail_)->next_free_block = block;
507 E : } else {
508 E : DCHECK(head_ == NULL);
509 E : head_ = block;
510 : }
511 E : tail_ = block;
512 E : }
513 :
514 E : TrimQuarantine();
515 E : }
516 :
517 E : size_t HeapProxy::GetAllocSize(size_t bytes, size_t alignment) {
518 E : bytes += std::max(sizeof(BlockHeader), alignment);
519 E : bytes += sizeof(BlockTrailer);
520 E : bytes += trailer_padding_size_;
521 E : return common::AlignUp(bytes, Shadow::kShadowGranularity);
522 E : }
523 :
524 : HeapProxy::BlockHeader* HeapProxy::UserPointerToBlockHeader(
525 E : const void* user_pointer) {
526 E : if (user_pointer == NULL)
527 i : return NULL;
528 :
529 E : const uint8* mem = static_cast<const uint8*>(user_pointer);
530 E : const BlockHeader* header = reinterpret_cast<const BlockHeader*>(mem) - 1;
531 E : if (header->magic_number != kBlockHeaderSignature)
532 i : return NULL;
533 :
534 E : return const_cast<BlockHeader*>(header);
535 E : }
536 :
537 : HeapProxy::BlockHeader* HeapProxy::AsanPointerToBlockHeader(
538 E : void* asan_pointer) {
539 E : DCHECK_NE(reinterpret_cast<void*>(NULL), asan_pointer);
540 :
541 E : void* user_pointer = AsanPointerToUserPointer(asan_pointer);
542 E : DCHECK_NE(reinterpret_cast<void*>(NULL), user_pointer);
543 :
544 E : return UserPointerToBlockHeader(user_pointer);
545 E : }
546 :
547 E : uint8* HeapProxy::AsanPointerToUserPointer(void* asan_pointer) {
548 E : if (asan_pointer == NULL)
549 i : return NULL;
550 :
551 : // Check if the ASan pointer is also pointing to the block header.
552 E : BlockHeader* header = reinterpret_cast<BlockHeader*>(asan_pointer);
553 E : if (header->magic_number == kBlockHeaderSignature)
554 E : return BlockHeaderToUserPointer(const_cast<BlockHeader*>(header));
555 :
556 : // There's an offset between the ASan pointer and the block header, use it to
557 : // get the user pointer.
558 E : size_t offset = *(reinterpret_cast<const size_t*>(asan_pointer));
559 : header = reinterpret_cast<BlockHeader*>(
560 E : reinterpret_cast<uint8*>(asan_pointer) + offset);
561 :
562 : // No need to check the signature of the header here, this is already done in
563 : // BlockHeaderToUserPointer.
564 E : return BlockHeaderToUserPointer(header);
565 E : }
566 :
567 E : uint8* HeapProxy::BlockHeaderToAsanPointer(const BlockHeader* header) {
568 E : DCHECK(header != NULL);
569 E : DCHECK_EQ(kBlockHeaderSignature, header->magic_number);
570 :
571 : return reinterpret_cast<uint8*>(
572 : common::AlignDown(reinterpret_cast<size_t>(header),
573 E : 1 << header->alignment_log));
574 E : }
575 :
576 E : uint8* HeapProxy::UserPointerToAsanPointer(const void* user_pointer) {
577 E : if (user_pointer == NULL)
578 i : return NULL;
579 :
580 : const BlockHeader* header =
581 E : reinterpret_cast<const BlockHeader*>(user_pointer) - 1;
582 :
583 E : return BlockHeaderToAsanPointer(header);
584 E : }
585 :
586 : HeapProxy::BlockTrailer* HeapProxy::BlockHeaderToBlockTrailer(
587 E : const BlockHeader* header) {
588 E : DCHECK(header != NULL);
589 E : DCHECK_EQ(kBlockHeaderSignature, header->magic_number);
590 : // We want the block trailers to be 4 byte aligned after the end of a block.
591 E : const size_t kBlockTrailerAlignment = 4;
592 :
593 E : uint8* mem = reinterpret_cast<uint8*>(const_cast<BlockHeader*>(header));
594 : size_t aligned_size =
595 : common::AlignUp(sizeof(BlockHeader) + header->block_size,
596 E : kBlockTrailerAlignment);
597 :
598 E : return reinterpret_cast<BlockTrailer*>(mem + aligned_size);
599 E : }
600 :
601 E : uint8* HeapProxy::BlockHeaderToUserPointer(BlockHeader* header) {
602 E : DCHECK(header != NULL);
603 E : DCHECK_EQ(kBlockHeaderSignature, header->magic_number);
604 E : DCHECK(header->state == ALLOCATED || header->state == QUARANTINED);
605 :
606 E : return reinterpret_cast<uint8*>(header + 1);
607 E : }
608 :
609 : HeapProxy::BadAccessKind HeapProxy::GetBadAccessKind(const void* addr,
610 E : BlockHeader* header) {
611 E : DCHECK(addr != NULL);
612 E : DCHECK(header != NULL);
613 :
614 E : BadAccessKind bad_access_kind = UNKNOWN_BAD_ACCESS;
615 :
616 E : if (header->state == QUARANTINED) {
617 E : bad_access_kind = USE_AFTER_FREE;
618 E : } else {
619 E : if (addr < (BlockHeaderToUserPointer(header)))
620 E : bad_access_kind = HEAP_BUFFER_UNDERFLOW;
621 E : else if (addr >= (BlockHeaderToUserPointer(header) + header->block_size))
622 E : bad_access_kind = HEAP_BUFFER_OVERFLOW;
623 E : else if (Shadow::GetShadowMarkerForAddress(addr) ==
624 i : Shadow::kHeapFreedByte) {
625 : // This is a use after free on a block managed by a nested heap.
626 i : bad_access_kind = USE_AFTER_FREE;
627 : }
628 : }
629 E : return bad_access_kind;
630 E : }
631 :
632 E : HeapProxy::BlockHeader* HeapProxy::FindBlockContainingAddress(uint8* addr) {
633 E : DCHECK(addr != NULL);
634 :
635 E : uint8* original_addr = addr;
636 : addr = reinterpret_cast<uint8*>(
637 : common::AlignDown(reinterpret_cast<size_t>(addr),
638 E : Shadow::kShadowGranularity));
639 :
640 : // Here's what the memory will look like for a block nested into another one:
641 : // +-----------------------------+--------------+---------------+
642 : // Shadow: | Left redzone | | Right redzone |
643 : // +------+---------------+------+--------------+-------+-------+
644 : // Memory: | | BH_2 Padding | BH_2 | Data Block 2 | BT_2 | |
645 : // | BH_1 +---------------+------+--------------+-------+ BT_1 |
646 : // | | Data Block 1 | |
647 : // +------+---------------------------------------------+-------+
648 : // Legend:
649 : // - BH_X: Block header of the block X.
650 : // - BT_X: Block trailer of the block X.
651 : //
652 : // In this example block 2 has an alignment different from block 1, hence the
653 : // padding in front of its header.
654 : //
655 : // There can be several scenarios:
656 : //
657 : // 1) |addr| doesn't point to an address corresponding to a left redzone in
658 : // the shadow. In this case we should walk the shadow until we find the
659 : // header of a block containing |addr|.
660 : //
661 : // 2) |addr| points to the left redzone of the blocks, this can be due to
662 : // several types of bad accesses: a underflow on block 1, a underflow on
663 : // block 2 or a use after free on block 1. In this case we need to scan the
664 : // shadow to the left and to the right until we find a block header, or
665 : // until we get out of the shadow. We'll get 2 pointers to the bounds of
666 : // the left redzone, at least one of them will point to a block header, if
667 : // not then the heap is corrupted. If exactly one of them points to a
668 : // header then it's our block header, if it doesn't encapsulate |addr| then
669 : // the heap is corrupted. If both of them point to a block header then the
670 : // address will fall in the header of exactly one of them. If both or
671 : // neither, we have heap corruption.
672 :
673 : // This corresponds to the first case.
674 : if (Shadow::GetShadowMarkerForAddress(reinterpret_cast<void*>(addr)) !=
675 E : Shadow::kHeapLeftRedzone) {
676 E : while (addr >= kAddressLowerLimit) {
677 E : addr -= Shadow::kShadowGranularity;
678 : if (Shadow::GetShadowMarkerForAddress(reinterpret_cast<void*>(addr)) ==
679 E : Shadow::kHeapLeftRedzone) {
680 : BlockHeader* header = reinterpret_cast<BlockHeader*>(addr -
681 E : (sizeof(BlockHeader) - Shadow::kShadowGranularity));
682 E : if (header->magic_number != kBlockHeaderSignature)
683 E : continue;
684 : size_t block_size = GetAllocSize(header->block_size,
685 E : 1 << header->alignment_log);
686 E : uint8* asan_ptr = BlockHeaderToAsanPointer(header);
687 E : if (reinterpret_cast<uint8*>(original_addr) < asan_ptr + block_size)
688 E : return header;
689 : }
690 E : }
691 :
692 : // This address doesn't belong to any block.
693 E : return NULL;
694 : }
695 :
696 : // The bad access occurred in a left redzone, this corresponds to the second
697 : // case.
698 :
699 : // Starts by looking for the left bound.
700 E : uint8* left_bound = reinterpret_cast<uint8*>(addr);
701 E : BlockHeader* left_header = NULL;
702 E : size_t left_block_size = 0;
703 : while (Shadow::GetShadowMarkerForAddress(left_bound) ==
704 E : Shadow::kHeapLeftRedzone) {
705 E : BlockHeader* temp_header = reinterpret_cast<BlockHeader*>(left_bound);
706 E : if (temp_header->magic_number == kBlockHeaderSignature) {
707 E : left_header = temp_header;
708 : left_block_size = GetAllocSize(left_header->block_size,
709 E : 1 << left_header->alignment_log);
710 E : break;
711 : }
712 E : left_bound -= Shadow::kShadowGranularity;
713 E : }
714 :
715 : // Look for the right bound.
716 E : uint8* right_bound = reinterpret_cast<uint8*>(addr);
717 E : BlockHeader* right_header = NULL;
718 E : size_t right_block_size = 0;
719 : while (Shadow::GetShadowMarkerForAddress(right_bound +
720 E : Shadow::kShadowGranularity) == Shadow::kHeapLeftRedzone) {
721 E : right_bound += Shadow::kShadowGranularity;
722 E : BlockHeader* temp_header = reinterpret_cast<BlockHeader*>(right_bound);
723 E : if (temp_header->magic_number == kBlockHeaderSignature) {
724 E : right_header = temp_header;
725 : right_block_size = GetAllocSize(right_header->block_size,
726 E : 1 << right_header->alignment_log);
727 E : break;
728 : }
729 E : }
730 :
731 E : CHECK(left_header != NULL || right_header != NULL);
732 :
733 : // If only one of the bounds corresponds to a block header then we return it.
734 :
735 E : if (left_header != NULL && right_header == NULL) {
736 : if (original_addr >= reinterpret_cast<uint8*>(left_header) +
737 E : left_block_size) {
738 : // TODO(sebmarchand): Report that the heap is corrupted.
739 i : return NULL;
740 : }
741 E : return left_header;
742 : }
743 :
744 E : if (right_header != NULL && left_header == NULL) {
745 : if (original_addr < BlockHeaderToAsanPointer(right_header) ||
746 : original_addr >= BlockHeaderToAsanPointer(right_header) +
747 i : right_block_size) {
748 : // TODO(sebmarchand): Report that the heap is corrupted.
749 i : return NULL;
750 : }
751 i : return right_header;
752 : }
753 :
754 E : DCHECK(left_header != NULL && right_header != NULL);
755 :
756 : // Otherwise we start by looking if |addr| is contained in the rightmost
757 : // block.
758 : uint8* right_block_left_bound = reinterpret_cast<uint8*>(
759 E : BlockHeaderToAsanPointer(right_header));
760 : if (original_addr >= right_block_left_bound &&
761 E : original_addr < right_block_left_bound + right_block_size) {
762 i : return right_header;
763 : }
764 :
765 : uint8* left_block_left_bound = reinterpret_cast<uint8*>(
766 E : BlockHeaderToAsanPointer(left_header));
767 : if (original_addr < left_block_left_bound ||
768 E : original_addr >= left_block_left_bound + left_block_size) {
769 : // TODO(sebmarchand): Report that the heap is corrupted.
770 i : return NULL;
771 : }
772 :
773 E : return left_header;
774 E : }
775 :
776 : HeapProxy::BlockHeader* HeapProxy::FindContainingFreedBlock(
777 E : BlockHeader* inner_block) {
778 E : DCHECK_NE(reinterpret_cast<BlockHeader*>(NULL), inner_block);
779 E : BlockHeader* containing_block = NULL;
780 : do {
781 E : containing_block = FindContainingBlock(inner_block);
782 E : inner_block = containing_block;
783 : } while (containing_block != NULL &&
784 E : containing_block->state != QUARANTINED);
785 E : return containing_block;
786 E : }
787 :
788 : HeapProxy::BlockHeader* HeapProxy::FindContainingBlock(
789 E : BlockHeader* inner_block) {
790 E : DCHECK_NE(reinterpret_cast<BlockHeader*>(NULL), inner_block);
791 E : DCHECK_EQ(kBlockHeaderSignature, inner_block->magic_number);
792 :
793 E : size_t addr = reinterpret_cast<size_t>(inner_block);
794 : addr = common::AlignDown(addr - Shadow::kShadowGranularity,
795 E : Shadow::kShadowGranularity);
796 :
797 : // Try to find a block header containing this block.
798 E : while (addr >= reinterpret_cast<size_t>(kAddressLowerLimit)) {
799 : // Only look at the addresses tagged as the redzone of a block.
800 : if (Shadow::GetShadowMarkerForAddress(reinterpret_cast<void*>(addr)) ==
801 E : Shadow::kHeapLeftRedzone) {
802 E : BlockHeader* temp_header = reinterpret_cast<BlockHeader*>(addr);
803 E : if (temp_header->magic_number == kBlockHeaderSignature) {
804 : size_t block_size = GetAllocSize(temp_header->block_size,
805 E : 1 << temp_header->alignment_log);
806 : // Makes sure that the inner block is contained in this block.
807 : if (reinterpret_cast<size_t>(inner_block) <
808 E : reinterpret_cast<size_t>(temp_header) + block_size) {
809 E : return temp_header;
810 : }
811 : }
812 : }
813 E : addr -= Shadow::kShadowGranularity;
814 E : }
815 :
816 E : return NULL;
817 E : }
818 :
819 E : bool HeapProxy::GetBadAccessInformation(AsanErrorInfo* bad_access_info) {
820 E : DCHECK(bad_access_info != NULL);
821 : BlockHeader* header = FindBlockContainingAddress(
822 E : reinterpret_cast<uint8*>(bad_access_info->location));
823 :
824 E : if (header == NULL)
825 E : return false;
826 :
827 E : BlockTrailer* trailer = BlockHeaderToBlockTrailer(header);
828 E : DCHECK(trailer != NULL);
829 :
830 E : if (bad_access_info->error_type != DOUBLE_FREE) {
831 : bad_access_info->error_type = GetBadAccessKind(bad_access_info->location,
832 E : header);
833 : }
834 :
835 : // Checks if there's a containing block in the case of a use after free on a
836 : // block owned by a nested heap.
837 E : BlockHeader* containing_block = NULL;
838 : if (bad_access_info->error_type == USE_AFTER_FREE &&
839 E : header->state != QUARANTINED) {
840 i : containing_block = FindContainingFreedBlock(header);
841 : }
842 :
843 : // Get the bad access description if we've been able to determine its kind.
844 E : if (bad_access_info->error_type != UNKNOWN_BAD_ACCESS) {
845 E : bad_access_info->microseconds_since_free = GetTimeSinceFree(header);
846 :
847 E : DCHECK(header->alloc_stack != NULL);
848 : memcpy(bad_access_info->alloc_stack,
849 : header->alloc_stack->frames(),
850 E : header->alloc_stack->num_frames() * sizeof(void*));
851 E : bad_access_info->alloc_stack_size = header->alloc_stack->num_frames();
852 E : bad_access_info->alloc_tid = trailer->alloc_tid;
853 :
854 E : if (header->state != ALLOCATED) {
855 E : const StackCapture* free_stack = header->free_stack;
856 E : BlockTrailer* free_stack_trailer = trailer;
857 : // Use the free metadata of the containing block if there's one.
858 E : if (containing_block != NULL) {
859 i : free_stack = containing_block->free_stack;
860 i : free_stack_trailer = BlockHeaderToBlockTrailer(containing_block);
861 : }
862 : memcpy(bad_access_info->free_stack,
863 : free_stack->frames(),
864 E : free_stack->num_frames() * sizeof(void*));
865 E : bad_access_info->free_stack_size = free_stack->num_frames();
866 E : bad_access_info->free_tid = free_stack_trailer->free_tid;
867 : }
868 E : GetAddressInformation(header, bad_access_info);
869 E : return true;
870 : }
871 :
872 i : return false;
873 E : }
874 :
875 : void HeapProxy::GetAddressInformation(BlockHeader* header,
876 E : AsanErrorInfo* bad_access_info) {
877 E : DCHECK(header != NULL);
878 E : DCHECK(bad_access_info != NULL);
879 :
880 E : DCHECK(header != NULL);
881 E : DCHECK(bad_access_info != NULL);
882 E : DCHECK(bad_access_info->location != NULL);
883 :
884 E : uint8* block_alloc = BlockHeaderToUserPointer(header);
885 E : int offset = 0;
886 E : char* offset_relativity = "";
887 E : switch (bad_access_info->error_type) {
888 : case HEAP_BUFFER_OVERFLOW:
889 : offset = static_cast<const uint8*>(bad_access_info->location)
890 E : - block_alloc - header->block_size;
891 E : offset_relativity = "beyond";
892 E : break;
893 : case HEAP_BUFFER_UNDERFLOW:
894 : offset = block_alloc -
895 E : static_cast<const uint8*>(bad_access_info->location);
896 E : offset_relativity = "before";
897 E : break;
898 : case USE_AFTER_FREE:
899 : offset = static_cast<const uint8*>(bad_access_info->location)
900 E : - block_alloc;
901 E : offset_relativity = "inside";
902 E : break;
903 : case WILD_ACCESS:
904 : case DOUBLE_FREE:
905 : case UNKNOWN_BAD_ACCESS:
906 E : return;
907 : default:
908 i : NOTREACHED() << "Error trying to dump address information.";
909 : }
910 :
911 : size_t shadow_info_bytes = base::snprintf(
912 : bad_access_info->shadow_info,
913 : arraysize(bad_access_info->shadow_info) - 1,
914 : "%08X is %d bytes %s %d-byte block [%08X,%08X)\n",
915 : bad_access_info->location,
916 : offset,
917 : offset_relativity,
918 : header->block_size,
919 : block_alloc,
920 E : block_alloc + header->block_size);
921 :
922 E : std::string shadow_memory;
923 E : Shadow::AppendShadowArrayText(bad_access_info->location, &shadow_memory);
924 : size_t shadow_mem_bytes = base::snprintf(
925 : bad_access_info->shadow_memory,
926 : arraysize(bad_access_info->shadow_memory) - 1,
927 : "%s",
928 E : shadow_memory.c_str());
929 :
930 : // Ensure that we had enough space to store the full shadow information.
931 E : DCHECK_LE(shadow_info_bytes, arraysize(bad_access_info->shadow_info) - 1);
932 E : DCHECK_LE(shadow_mem_bytes, arraysize(bad_access_info->shadow_memory) - 1);
933 E : }
934 :
935 E : const char* HeapProxy::AccessTypeToStr(BadAccessKind bad_access_kind) {
936 E : switch (bad_access_kind) {
937 : case USE_AFTER_FREE:
938 E : return kHeapUseAfterFree;
939 : case HEAP_BUFFER_UNDERFLOW:
940 E : return kHeapBufferUnderFlow;
941 : case HEAP_BUFFER_OVERFLOW:
942 E : return kHeapBufferOverFlow;
943 : case WILD_ACCESS:
944 E : return kWildAccess;
945 : case INVALID_ADDRESS:
946 E : return kInvalidAddress;
947 : case DOUBLE_FREE:
948 E : return kAttemptingDoubleFree;
949 : case UNKNOWN_BAD_ACCESS:
950 E : return kHeapUnknownError;
951 : default:
952 i : NOTREACHED() << "Unexpected bad access kind.";
953 i : return NULL;
954 : }
955 E : }
956 :
957 E : LIST_ENTRY* HeapProxy::ToListEntry(HeapProxy* proxy) {
958 E : DCHECK(proxy != NULL);
959 E : return &proxy->list_entry_;
960 E : }
961 :
962 : HeapProxy* HeapProxy::FromListEntry(LIST_ENTRY* list_entry) {
963 : DCHECK(list_entry != NULL);
964 : return CONTAINING_RECORD(list_entry, HeapProxy, list_entry_);
965 : }
966 :
967 E : uint64 HeapProxy::GetTimeSinceFree(const BlockHeader* header) {
968 E : DCHECK(header != NULL);
969 :
970 E : if (header->state == ALLOCATED)
971 E : return 0;
972 :
973 E : BlockTrailer* trailer = BlockHeaderToBlockTrailer(header);
974 E : DCHECK(trailer != NULL);
975 :
976 E : uint64 cycles_since_free = trace::common::GetTsc() - trailer->free_timestamp;
977 :
978 : // On x86/64, as long as cpu_cycles_per_us_ is 64-bit aligned, the write is
979 : // atomic, which means we don't care about multiple writers since it's not an
980 : // update based on the previous value.
981 E : if (cpu_cycles_per_us_ == 0.0)
982 E : cpu_cycles_per_us_ = GetCpuCyclesPerUs();
983 E : DCHECK_NE(0.0, cpu_cycles_per_us_);
984 :
985 E : return cycles_since_free / cpu_cycles_per_us_;
986 E : }
987 :
988 : void HeapProxy::GetAsanExtent(const void* user_pointer,
989 : void** asan_pointer,
990 E : size_t* size) {
991 E : DCHECK_NE(reinterpret_cast<void*>(NULL), user_pointer);
992 E : DCHECK_NE(reinterpret_cast<void*>(NULL), asan_pointer);
993 E : DCHECK_NE(reinterpret_cast<void*>(NULL), size);
994 :
995 E : *asan_pointer = UserPointerToAsanPointer(user_pointer);
996 E : BlockHeader* block_header = UserPointerToBlockHeader(user_pointer);
997 :
998 E : DCHECK_NE(reinterpret_cast<void*>(NULL), block_header);
999 : *size = GetAllocSize(block_header->block_size,
1000 E : 1 << block_header->alignment_log);
1001 E : }
1002 :
1003 : void HeapProxy::GetUserExtent(const void* asan_pointer,
1004 : void** user_pointer,
1005 E : size_t* size) {
1006 E : DCHECK_NE(reinterpret_cast<void*>(NULL), asan_pointer);
1007 E : DCHECK_NE(reinterpret_cast<void*>(NULL), user_pointer);
1008 E : DCHECK_NE(reinterpret_cast<void*>(NULL), size);
1009 :
1010 E : *user_pointer = AsanPointerToUserPointer(const_cast<void*>(asan_pointer));
1011 E : BlockHeader* block_header = UserPointerToBlockHeader(*user_pointer);
1012 :
1013 E : DCHECK_NE(reinterpret_cast<void*>(NULL), block_header);
1014 E : *size = block_header->block_size;
1015 E : }
1016 :
1017 : } // namespace asan
1018 : } // namespace agent
|