1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/heap_managers/block_heap_manager.h"
16 :
17 : #include <algorithm>
18 : #include <utility>
19 :
20 : #include "base/bind.h"
21 : #include "base/rand_util.h"
22 : #include "syzygy/agent/asan/page_protection_helpers.h"
23 : #include "syzygy/agent/asan/runtime.h"
24 : #include "syzygy/agent/asan/shadow.h"
25 : #include "syzygy/agent/asan/timed_try.h"
26 : #include "syzygy/agent/asan/heaps/internal_heap.h"
27 : #include "syzygy/agent/asan/heaps/large_block_heap.h"
28 : #include "syzygy/agent/asan/heaps/simple_block_heap.h"
29 : #include "syzygy/agent/asan/heaps/win_heap.h"
30 : #include "syzygy/agent/asan/heaps/zebra_block_heap.h"
31 : #include "syzygy/common/asan_parameters.h"
32 :
33 : namespace agent {
34 : namespace asan {
35 : namespace heap_managers {
36 :
37 : namespace {
38 :
39 : typedef HeapManagerInterface::HeapId HeapId;
40 : using heaps::LargeBlockHeap;
41 : using heaps::ZebraBlockHeap;
42 :
43 : // Return the position of the most significant bit in a 32 bit unsigned value.
44 : size_t GetMSBIndex(size_t n) {
45 : // Algorithm taken from
46 : // http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog
47 : size_t r = 0;
48 : size_t shift = 0;
49 : r = (n > 0xFFFF) << 4;
50 : n >>= r;
51 : shift = (n > 0xFF) << 3;
52 : n >>= shift;
53 : r |= shift;
54 : shift = (n > 0xF) << 2;
55 : n >>= shift;
56 : r |= shift;
57 : shift = (n > 0x3) << 1;
58 : n >>= shift;
59 : r |= shift;
60 : r |= (n >> 1);
61 : return r;
62 : }
63 :
64 : } // namespace
65 :
66 : BlockHeapManager::BlockHeapManager(Shadow* shadow,
67 : StackCaptureCache* stack_cache,
68 : MemoryNotifierInterface* memory_notifier)
69 : : shadow_(shadow),
70 : stack_cache_(stack_cache),
71 : memory_notifier_(memory_notifier),
72 : initialized_(false),
73 : process_heap_(nullptr),
74 : process_heap_underlying_heap_(nullptr),
75 : process_heap_id_(0),
76 : zebra_block_heap_(nullptr),
77 : zebra_block_heap_id_(0),
78 : large_block_heap_id_(0),
79 : locked_heaps_(nullptr),
80 : enable_page_protections_(true),
81 E : corrupt_block_registry_cache_(L"SyzyAsanCorruptBlocks") {
82 E : DCHECK_NE(static_cast<Shadow*>(nullptr), shadow);
83 E : DCHECK_NE(static_cast<StackCaptureCache*>(nullptr), stack_cache);
84 E : DCHECK_NE(static_cast<MemoryNotifierInterface*>(nullptr), memory_notifier);
85 E : SetDefaultAsanParameters(¶meters_);
86 :
87 : // Initialize the allocation-filter flag (using Thread Local Storage).
88 E : allocation_filter_flag_tls_ = ::TlsAlloc();
89 E : CHECK_NE(TLS_OUT_OF_INDEXES, allocation_filter_flag_tls_);
90 : // And disable it by default.
91 E : set_allocation_filter_flag(false);
92 E : }
93 :
94 E : BlockHeapManager::~BlockHeapManager() {
95 E : TearDownHeapManager();
96 E : }
97 :
98 E : void BlockHeapManager::Init() {
99 E : DCHECK(!initialized_);
100 :
101 : {
102 E : base::AutoLock lock(lock_);
103 E : InitInternalHeap();
104 E : corrupt_block_registry_cache_.Init();
105 E : }
106 :
107 : // This takes care of its own locking, as its reentrant.
108 E : PropagateParameters();
109 :
110 : {
111 E : base::AutoLock lock(lock_);
112 E : InitProcessHeap();
113 E : initialized_ = true;
114 E : }
115 E : }
116 :
117 E : HeapId BlockHeapManager::CreateHeap() {
118 E : DCHECK(initialized_);
119 :
120 : // Creates the underlying heap used by this heap.
121 : // TODO(chrisha): We should be using the internal allocator for these
122 : // heap allocations!
123 E : HeapInterface* underlying_heap = new heaps::WinHeap();
124 : // Creates the heap.
125 E : BlockHeapInterface* heap = new heaps::SimpleBlockHeap(underlying_heap);
126 :
127 E : base::AutoLock lock(lock_);
128 E : underlying_heaps_map_.insert(std::make_pair(heap, underlying_heap));
129 E : HeapMetadata metadata = { &shared_quarantine_, false };
130 E : auto result = heaps_.insert(std::make_pair(heap, metadata));
131 E : return GetHeapId(result);
132 E : }
133 :
134 E : bool BlockHeapManager::DestroyHeap(HeapId heap_id) {
135 E : DCHECK(initialized_);
136 E : DCHECK(IsValidHeapId(heap_id, false));
137 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
138 E : BlockQuarantineInterface* quarantine = GetQuarantineFromId(heap_id);
139 :
140 : {
141 : // Move the heap from the active to the dying list. This prevents it from
142 : // being used while it's being torn down.
143 E : base::AutoLock lock(lock_);
144 E : auto iter = heaps_.find(heap);
145 E : iter->second.is_dying = true;
146 E : }
147 :
148 : // Destroy the heap and flush its quarantine. This is done outside of the
149 : // lock to both reduce contention and to ensure that we can re-enter the
150 : // block heap manager if corruption is found during the heap tear down.
151 E : DestroyHeapContents(heap, quarantine);
152 :
153 : // Free up any resources associated with the heap. This modifies block
154 : // heap manager internals, so must be called under a lock.
155 : {
156 E : base::AutoLock lock(lock_);
157 E : DestroyHeapResourcesUnlocked(heap, quarantine);
158 E : heaps_.erase(heaps_.find(heap));
159 E : }
160 :
161 E : return true;
162 E : }
163 :
164 E : void* BlockHeapManager::Allocate(HeapId heap_id, size_t bytes) {
165 E : DCHECK(initialized_);
166 E : DCHECK(IsValidHeapId(heap_id, false));
167 :
168 : // Some allocations can pass through without instrumentation.
169 : if (parameters_.allocation_guard_rate < 1.0 &&
170 E : base::RandDouble() >= parameters_.allocation_guard_rate) {
171 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
172 E : void* alloc = heap->Allocate(bytes);
173 : if ((heap->GetHeapFeatures() &
174 E : HeapInterface::kHeapReportsReservations) != 0) {
175 i : shadow_->Unpoison(alloc, bytes);
176 : }
177 E : return alloc;
178 : }
179 :
180 : // Capture the current stack. InitFromStack is inlined to preserve the
181 : // greatest number of stack frames.
182 E : common::StackCapture stack;
183 E : stack.InitFromStack();
184 :
185 : // Build the set of heaps that will be used to satisfy the allocation. This
186 : // is a stack of heaps, and they will be tried in the reverse order they are
187 : // inserted.
188 :
189 : // We can always use the heap that was passed in.
190 E : HeapId heaps[3] = { heap_id, 0, 0 };
191 E : size_t heap_count = 1;
192 E : if (MayUseLargeBlockHeap(bytes)) {
193 E : DCHECK_LT(heap_count, arraysize(heaps));
194 E : heaps[heap_count++] = large_block_heap_id_;
195 : }
196 :
197 E : if (MayUseZebraBlockHeap(bytes)) {
198 E : DCHECK_LT(heap_count, arraysize(heaps));
199 E : heaps[heap_count++] = zebra_block_heap_id_;
200 : }
201 :
202 : // Use the selected heaps to try to satisfy the allocation.
203 E : void* alloc = nullptr;
204 E : BlockLayout block_layout = {};
205 E : for (int i = static_cast<int>(heap_count) - 1; i >= 0; --i) {
206 E : BlockHeapInterface* heap = GetHeapFromId(heaps[i]);
207 : alloc = heap->AllocateBlock(
208 : bytes,
209 : 0,
210 : parameters_.trailer_padding_size + sizeof(BlockTrailer),
211 E : &block_layout);
212 E : if (alloc != nullptr) {
213 E : heap_id = heaps[i];
214 E : break;
215 : }
216 E : }
217 :
218 : // The allocation can fail if we're out of memory.
219 E : if (alloc == nullptr)
220 E : return nullptr;
221 :
222 E : DCHECK_NE(static_cast<void*>(nullptr), alloc);
223 E : DCHECK_EQ(0u, reinterpret_cast<size_t>(alloc) % kShadowRatio);
224 E : BlockInfo block = {};
225 E : BlockInitialize(block_layout, alloc, false, &block);
226 :
227 : // Poison the redzones in the shadow memory as early as possible.
228 E : shadow_->PoisonAllocatedBlock(block);
229 :
230 E : block.header->alloc_stack = stack_cache_->SaveStackTrace(stack);
231 E : block.header->free_stack = nullptr;
232 E : block.header->state = ALLOCATED_BLOCK;
233 :
234 E : block.trailer->heap_id = heap_id;
235 :
236 E : BlockSetChecksum(block);
237 E : if (enable_page_protections_)
238 E : BlockProtectRedzones(block, shadow_);
239 :
240 E : return block.body;
241 E : }
242 :
243 E : bool BlockHeapManager::Free(HeapId heap_id, void* alloc) {
244 E : DCHECK(initialized_);
245 E : DCHECK(IsValidHeapId(heap_id, false));
246 :
247 : // The standard allows calling free on a null pointer.
248 E : if (alloc == nullptr)
249 E : return true;
250 :
251 E : BlockInfo block_info = {};
252 : if (!shadow_->IsBeginningOfBlockBody(alloc) ||
253 : !GetBlockInfo(shadow_, reinterpret_cast<BlockBody*>(alloc),
254 E : &block_info)) {
255 E : return FreeUnguardedAlloc(heap_id, alloc);
256 : }
257 :
258 E : if (enable_page_protections_) {
259 : // Precondition: A valid guarded allocation.
260 E : BlockProtectNone(block_info, shadow_);
261 : }
262 :
263 E : if (!BlockChecksumIsValid(block_info)) {
264 : // The free stack hasn't yet been set, but may have been filled with junk.
265 : // Reset it.
266 E : block_info.header->free_stack = nullptr;
267 E : if (ShouldReportCorruptBlock(&block_info))
268 E : ReportHeapError(alloc, CORRUPT_BLOCK);
269 E : return FreeCorruptBlock(&block_info);
270 : }
271 :
272 : if (block_info.header->state == QUARANTINED_BLOCK ||
273 E : block_info.header->state == QUARANTINED_FLOODED_BLOCK) {
274 E : ReportHeapError(alloc, DOUBLE_FREE);
275 E : return false;
276 : }
277 :
278 : // heap_id is just a hint, the block trailer contains the heap used for the
279 : // allocation.
280 E : heap_id = block_info.trailer->heap_id;
281 E : BlockQuarantineInterface* quarantine = GetQuarantineFromId(heap_id);
282 :
283 : // Poison the released alloc (marked as freed) and quarantine the block.
284 : // Note that the original data is left intact. This may make it easier
285 : // to debug a crash report/dump on access to a quarantined block.
286 E : shadow_->MarkAsFreed(block_info.body, block_info.body_size);
287 :
288 : // We need to update the block's metadata before pushing it into the
289 : // quarantine, otherwise a concurrent thread might try to pop it while its in
290 : // an invalid state.
291 E : common::StackCapture stack;
292 E : stack.InitFromStack();
293 : block_info.header->free_stack =
294 E : stack_cache_->SaveStackTrace(stack);
295 E : block_info.trailer->free_ticks = ::GetTickCount();
296 E : block_info.trailer->free_tid = ::GetCurrentThreadId();
297 :
298 : // Flip a coin and sometimes flood the block. When flooded, overwrites are
299 : // clearly visible; when not flooded, the original contents are left visible.
300 : bool flood = parameters_.quarantine_flood_fill_rate > 0.0 &&
301 E : base::RandDouble() <= parameters_.quarantine_flood_fill_rate;
302 E : if (flood) {
303 E : block_info.header->state = QUARANTINED_FLOODED_BLOCK;
304 E : ::memset(block_info.body, kBlockFloodFillByte, block_info.body_size);
305 E : } else {
306 E : block_info.header->state = QUARANTINED_BLOCK;
307 : }
308 :
309 : // Update the block checksum.
310 E : BlockSetChecksum(block_info);
311 :
312 E : CompactBlockInfo compact = {};
313 E : ConvertBlockInfo(block_info, &compact);
314 :
315 : {
316 : BlockQuarantineInterface::AutoQuarantineLock quarantine_lock(
317 E : quarantine, compact);
318 E : if (!quarantine->Push(compact))
319 E : return FreePristineBlock(&block_info);
320 :
321 E : if (enable_page_protections_) {
322 : // The recently pushed block can be popped out in TrimQuarantine if the
323 : // quarantine size is 0, in that case TrimQuarantine takes care of
324 : // properly unprotecting and freeing the block. If the protection is set
325 : // blindly after TrimQuarantine we could end up protecting a free (not
326 : // quarantined, not allocated) block.
327 E : BlockProtectAll(block_info, shadow_);
328 : }
329 E : }
330 E : TrimQuarantine(quarantine);
331 E : return true;
332 E : }
333 :
334 E : size_t BlockHeapManager::Size(HeapId heap_id, const void* alloc) {
335 E : DCHECK(initialized_);
336 E : DCHECK(IsValidHeapId(heap_id, false));
337 :
338 E : if (shadow_->IsBeginningOfBlockBody(alloc)) {
339 E : BlockInfo block_info = {};
340 : if (!GetBlockInfo(shadow_, reinterpret_cast<const BlockBody*>(alloc),
341 E : &block_info)) {
342 i : return 0;
343 : }
344 E : return block_info.body_size;
345 : }
346 :
347 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
348 : if ((heap->GetHeapFeatures() &
349 E : HeapInterface::kHeapSupportsGetAllocationSize) != 0) {
350 E : return heap->GetAllocationSize(alloc);
351 i : } else {
352 i : return 0;
353 : }
354 E : }
355 :
356 E : void BlockHeapManager::Lock(HeapId heap_id) {
357 E : DCHECK(initialized_);
358 E : DCHECK(IsValidHeapId(heap_id, false));
359 E : GetHeapFromId(heap_id)->Lock();
360 E : }
361 :
362 E : void BlockHeapManager::Unlock(HeapId heap_id) {
363 E : DCHECK(initialized_);
364 E : DCHECK(IsValidHeapId(heap_id, false));
365 E : GetHeapFromId(heap_id)->Unlock();
366 E : }
367 :
368 E : void BlockHeapManager::BestEffortLockAll() {
369 E : DCHECK(initialized_);
370 E : static const base::TimeDelta kTryTime(base::TimeDelta::FromMilliseconds(50));
371 E : lock_.Acquire();
372 :
373 : // Create room to store the list of locked heaps. This must use the internal
374 : // heap as any other heap may be involved in a crash and locked right now.
375 E : DCHECK_EQ(static_cast<HeapInterface**>(nullptr), locked_heaps_);
376 E : size_t alloc_size = sizeof(HeapInterface*) * (heaps_.size() + 1);
377 : locked_heaps_ = reinterpret_cast<HeapInterface**>(internal_heap_->Allocate(
378 E : alloc_size));
379 E : DCHECK_NE(static_cast<HeapInterface**>(nullptr), locked_heaps_);
380 E : ::memset(locked_heaps_, 0, alloc_size);
381 :
382 E : size_t index = 0;
383 E : for (auto& heap_quarantine_pair : heaps_) {
384 E : HeapInterface* heap = heap_quarantine_pair.first;
385 E : if (TimedTry(kTryTime, heap)) {
386 E : locked_heaps_[index] = heap;
387 E : ++index;
388 : }
389 E : }
390 E : }
391 :
392 E : void BlockHeapManager::UnlockAll() {
393 E : DCHECK(initialized_);
394 E : lock_.AssertAcquired();
395 E : DCHECK_NE(static_cast<HeapInterface**>(nullptr), locked_heaps_);
396 E : for (HeapInterface** heap = locked_heaps_; *heap != nullptr; ++heap)
397 E : (*heap)->Unlock();
398 E : internal_heap_->Free(locked_heaps_);
399 E : locked_heaps_ = nullptr;
400 E : lock_.Release();
401 E : }
402 :
403 : void BlockHeapManager::set_parameters(
404 E : const ::common::AsanParameters& parameters) {
405 : {
406 E : base::AutoLock lock(lock_);
407 E : parameters_ = parameters;
408 E : }
409 :
410 : // Releases the lock before propagating the parameters.
411 E : if (initialized_)
412 E : PropagateParameters();
413 E : }
414 :
415 E : void BlockHeapManager::TearDownHeapManager() {
416 E : base::AutoLock lock(lock_);
417 :
418 : // This would indicate that we have outstanding heap locks being
419 : // held. This shouldn't happen as |locked_heaps_| is only non-null
420 : // under |lock_|.
421 E : DCHECK_EQ(static_cast<HeapInterface**>(nullptr), locked_heaps_);
422 :
423 : // Delete all the heaps. This must be done manually to ensure that
424 : // all references to internal_heap_ have been cleaned up.
425 E : HeapQuarantineMap::iterator iter_heaps = heaps_.begin();
426 E : for (; iter_heaps != heaps_.end(); ++iter_heaps) {
427 E : DCHECK(!iter_heaps->second.is_dying);
428 E : iter_heaps->second.is_dying = true;
429 E : DestroyHeapContents(iter_heaps->first, iter_heaps->second.quarantine);
430 : DestroyHeapResourcesUnlocked(iter_heaps->first,
431 E : iter_heaps->second.quarantine);
432 E : }
433 : // Clear the active heap list.
434 E : heaps_.clear();
435 :
436 : // Clear the specialized heap references since they were deleted.
437 E : process_heap_ = nullptr;
438 E : process_heap_underlying_heap_ = nullptr;
439 E : process_heap_id_ = 0;
440 E : zebra_block_heap_ = nullptr;
441 E : zebra_block_heap_id_ = 0;
442 E : large_block_heap_id_ = 0;
443 :
444 : // Free the allocation-filter flag (TLS).
445 E : if (allocation_filter_flag_tls_ != TLS_OUT_OF_INDEXES) {
446 E : ::TlsFree(allocation_filter_flag_tls_);
447 E : allocation_filter_flag_tls_ = TLS_OUT_OF_INDEXES;
448 : }
449 E : }
450 :
451 : HeapId BlockHeapManager::GetHeapId(
452 E : HeapQuarantineMap::iterator iterator) const {
453 E : HeapQuarantinePair* hq_pair = &(*iterator);
454 E : return reinterpret_cast<HeapId>(hq_pair);
455 E : }
456 :
457 : HeapId BlockHeapManager::GetHeapId(
458 E : const std::pair<HeapQuarantineMap::iterator, bool>& insert_result) const {
459 E : return GetHeapId(insert_result.first);
460 E : }
461 :
462 : bool BlockHeapManager::IsValidHeapIdUnsafe(HeapId heap_id, bool allow_dying) {
463 : DCHECK(initialized_);
464 : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
465 : if (!IsValidHeapIdUnsafeUnlockedImpl1(hq))
466 : return false;
467 : base::AutoLock auto_lock(lock_);
468 : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
469 : return false;
470 : return true;
471 : }
472 :
473 : bool BlockHeapManager::IsValidHeapIdUnsafeUnlocked(
474 : HeapId heap_id, bool allow_dying) {
475 : DCHECK(initialized_);
476 : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
477 : if (!IsValidHeapIdUnsafeUnlockedImpl1(hq))
478 : return false;
479 : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
480 : return false;
481 : return true;
482 : }
483 :
484 E : bool BlockHeapManager::IsValidHeapId(HeapId heap_id, bool allow_dying) {
485 E : DCHECK(initialized_);
486 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
487 E : if (!IsValidHeapIdUnlockedImpl1(hq))
488 i : return false;
489 E : base::AutoLock auto_lock(lock_);
490 E : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
491 i : return false;
492 E : return true;
493 E : }
494 :
495 E : bool BlockHeapManager::IsValidHeapIdUnlocked(HeapId heap_id, bool allow_dying) {
496 E : DCHECK(initialized_);
497 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
498 E : if (!IsValidHeapIdUnlockedImpl1(hq))
499 E : return false;
500 E : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
501 E : return false;
502 E : return true;
503 E : }
504 :
505 : bool BlockHeapManager::IsValidHeapIdUnsafeUnlockedImpl1(
506 E : HeapQuarantinePair* hq) {
507 : // First check to see if it looks like it has the right shape. This could
508 : // cause an invalid access if the heap_id is completely a wild value.
509 E : if (hq == nullptr)
510 i : return false;
511 E : if (hq->first == nullptr || hq->second.quarantine == nullptr)
512 i : return false;
513 E : return true;
514 E : }
515 :
516 : bool BlockHeapManager::IsValidHeapIdUnlockedImpl1(
517 i : HeapQuarantinePair* hq) {
518 : // Run this in an exception handler, as if it's a really invalid heap id
519 : // we could end up reading from inaccessible memory.
520 i : __try {
521 i : if (!IsValidHeapIdUnsafeUnlockedImpl1(hq))
522 i : return false;
523 i : } __except(EXCEPTION_EXECUTE_HANDLER) {
524 i : return false;
525 i : }
526 i : return true;
527 i : }
528 :
529 : bool BlockHeapManager::IsValidHeapIdUnlockedImpl2(HeapQuarantinePair* hq,
530 E : bool allow_dying) {
531 : // Look in the list of live heaps first.
532 E : auto it = heaps_.find(hq->first);
533 E : if (it != heaps_.end()) {
534 E : HeapId heap_id = GetHeapId(it);
535 E : if (heap_id == reinterpret_cast<HeapId>(hq))
536 E : return !it->second.is_dying || allow_dying;
537 : }
538 :
539 E : return false;
540 E : }
541 :
542 E : BlockHeapInterface* BlockHeapManager::GetHeapFromId(HeapId heap_id) {
543 E : DCHECK_NE(reinterpret_cast<HeapId>(nullptr), heap_id);
544 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
545 E : DCHECK_NE(static_cast<BlockHeapInterface*>(nullptr), hq->first);
546 E : return hq->first;
547 E : }
548 :
549 : BlockQuarantineInterface* BlockHeapManager::GetQuarantineFromId(
550 E : HeapId heap_id) {
551 E : DCHECK_NE(reinterpret_cast<HeapId>(nullptr), heap_id);
552 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
553 : DCHECK_NE(static_cast<BlockQuarantineInterface*>(nullptr),
554 E : hq->second.quarantine);
555 E : return hq->second.quarantine;
556 E : }
557 :
558 E : void BlockHeapManager::PropagateParameters() {
559 : // The internal heap should already be setup.
560 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), internal_heap_.get());
561 :
562 E : size_t quarantine_size = shared_quarantine_.max_quarantine_size();
563 E : shared_quarantine_.set_max_quarantine_size(parameters_.quarantine_size);
564 E : shared_quarantine_.set_max_object_size(parameters_.quarantine_block_size);
565 :
566 : // Trim the quarantine if its maximum size has decreased.
567 E : if (initialized_ && quarantine_size > parameters_.quarantine_size)
568 E : TrimQuarantine(&shared_quarantine_);
569 :
570 E : if (parameters_.enable_zebra_block_heap && zebra_block_heap_ == nullptr) {
571 : // Initialize the zebra heap only if it isn't already initialized.
572 : // The zebra heap cannot be resized once created.
573 E : base::AutoLock lock(lock_);
574 : zebra_block_heap_ = new ZebraBlockHeap(parameters_.zebra_block_heap_size,
575 : memory_notifier_,
576 E : internal_heap_.get());
577 : // The zebra block heap is its own quarantine.
578 E : HeapMetadata heap_metadata = { zebra_block_heap_, false };
579 : auto result = heaps_.insert(std::make_pair(zebra_block_heap_,
580 E : heap_metadata));
581 E : zebra_block_heap_id_ = GetHeapId(result);
582 E : }
583 :
584 E : if (zebra_block_heap_ != nullptr) {
585 : zebra_block_heap_->set_quarantine_ratio(
586 E : parameters_.zebra_block_heap_quarantine_ratio);
587 E : if (initialized_)
588 E : TrimQuarantine(zebra_block_heap_);
589 : }
590 :
591 : // Create the LargeBlockHeap if need be.
592 E : if (parameters_.enable_large_block_heap && large_block_heap_id_ == 0) {
593 E : base::AutoLock lock(lock_);
594 : BlockHeapInterface* heap = new LargeBlockHeap(
595 E : memory_notifier_, internal_heap_.get());
596 E : HeapMetadata metadata = { &shared_quarantine_, false };
597 E : auto result = heaps_.insert(std::make_pair(heap, metadata));
598 E : large_block_heap_id_ = GetHeapId(result);
599 E : }
600 :
601 : // TODO(chrisha|sebmarchand): Clean up existing blocks that exceed the
602 : // maximum block size? This will require an entirely new TrimQuarantine
603 : // function. Since this is never changed at runtime except in our
604 : // unittests, this is not clearly useful.
605 E : }
606 :
607 E : bool BlockHeapManager::allocation_filter_flag() const {
608 E : return reinterpret_cast<bool>(::TlsGetValue(allocation_filter_flag_tls_));
609 E : }
610 :
611 E : void BlockHeapManager::set_allocation_filter_flag(bool value) {
612 E : ::TlsSetValue(allocation_filter_flag_tls_, reinterpret_cast<void*>(value));
613 E : }
614 :
615 E : HeapType BlockHeapManager::GetHeapTypeUnlocked(HeapId heap_id) {
616 E : DCHECK(initialized_);
617 E : DCHECK(IsValidHeapIdUnlocked(heap_id, true));
618 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
619 E : return heap->GetHeapType();
620 E : }
621 :
622 : bool BlockHeapManager::DestroyHeapContents(
623 : BlockHeapInterface* heap,
624 E : BlockQuarantineInterface* quarantine) {
625 E : DCHECK(initialized_);
626 E : DCHECK_NE(static_cast<BlockHeapInterface*>(nullptr), heap);
627 E : DCHECK_NE(static_cast<BlockQuarantineInterface*>(nullptr), quarantine);
628 :
629 : // Starts by removing all the block from this heap from the quarantine.
630 E : BlockQuarantineInterface::ObjectVector blocks_vec;
631 E : BlockQuarantineInterface::ObjectVector blocks_to_free;
632 :
633 : // We'll keep the blocks that don't belong to this heap in a temporary list.
634 : // While this isn't optimal in terms of performance, destroying a heap isn't a
635 : // common operation.
636 : // TODO(sebmarchand): Add a version of the ShardedBlockQuarantine::Empty
637 : // method that accepts a functor to filter the blocks to remove.
638 E : BlockQuarantineInterface::ObjectVector blocks_to_reinsert;
639 E : quarantine->Empty(&blocks_vec);
640 :
641 E : for (const auto& iter_block : blocks_vec) {
642 E : BlockInfo expanded = {};
643 E : ConvertBlockInfo(iter_block, &expanded);
644 :
645 E : if (enable_page_protections_) {
646 : // Remove protection to enable access to the block header.
647 E : BlockProtectNone(expanded, shadow_);
648 : }
649 :
650 E : BlockHeapInterface* block_heap = GetHeapFromId(expanded.trailer->heap_id);
651 :
652 E : if (block_heap == heap) {
653 E : blocks_to_free.push_back(iter_block);
654 E : } else {
655 E : blocks_to_reinsert.push_back(iter_block);
656 : }
657 E : }
658 :
659 : // Restore the blocks that don't belong to this quarantine.
660 E : for (const auto& iter_block : blocks_to_reinsert) {
661 E : BlockInfo expanded = {};
662 E : ConvertBlockInfo(iter_block, &expanded);
663 :
664 : BlockQuarantineInterface::AutoQuarantineLock quarantine_lock(quarantine,
665 E : iter_block);
666 E : if (quarantine->Push(iter_block)) {
667 E : if (enable_page_protections_) {
668 : // Restore protection to quarantined block.
669 E : BlockProtectAll(expanded, shadow_);
670 : }
671 E : } else {
672 : // Avoid memory leak.
673 i : blocks_to_free.push_back(iter_block);
674 : }
675 E : }
676 :
677 E : FreeBlockVector(blocks_to_free);
678 :
679 E : return true;
680 E : }
681 :
682 : void BlockHeapManager::DestroyHeapResourcesUnlocked(
683 : BlockHeapInterface* heap,
684 E : BlockQuarantineInterface* quarantine) {
685 : // If the heap has an underlying heap then free it as well.
686 : {
687 E : auto iter = underlying_heaps_map_.find(heap);
688 E : if (iter != underlying_heaps_map_.end()) {
689 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), iter->second);
690 E : delete iter->second;
691 E : underlying_heaps_map_.erase(iter);
692 : }
693 : }
694 :
695 E : delete heap;
696 E : }
697 :
698 E : void BlockHeapManager::TrimQuarantine(BlockQuarantineInterface* quarantine) {
699 E : DCHECK(initialized_);
700 E : DCHECK_NE(static_cast<BlockQuarantineInterface*>(nullptr), quarantine);
701 :
702 E : BlockQuarantineInterface::ObjectVector blocks_to_free;
703 :
704 : // Trim the quarantine to the new maximum size.
705 E : if (parameters_.quarantine_size == 0) {
706 i : quarantine->Empty(&blocks_to_free);
707 i : } else {
708 E : CompactBlockInfo compact = {};
709 E : while (quarantine->Pop(&compact))
710 E : blocks_to_free.push_back(compact);
711 : }
712 :
713 E : FreeBlockVector(blocks_to_free);
714 E : }
715 :
716 : void BlockHeapManager::FreeBlockVector(
717 E : BlockQuarantineInterface::ObjectVector& vec) {
718 E : for (const auto& iter_block : vec) {
719 E : BlockInfo expanded = {};
720 E : ConvertBlockInfo(iter_block, &expanded);
721 E : CHECK(FreePotentiallyCorruptBlock(&expanded));
722 E : }
723 E : }
724 :
725 : namespace {
726 :
727 : // A tiny helper function that checks if a quarantined filled block has a valid
728 : // body. If the block is not of that type simply always returns true.
729 E : bool BlockBodyIsValid(const BlockInfo& block_info) {
730 E : if (block_info.header->state != QUARANTINED_FLOODED_BLOCK)
731 E : return true;
732 E : if (BlockBodyIsFloodFilled(block_info))
733 E : return true;
734 E : return false;
735 E : }
736 :
737 : } // namespace
738 :
739 E : bool BlockHeapManager::FreePotentiallyCorruptBlock(BlockInfo* block_info) {
740 E : DCHECK(initialized_);
741 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
742 :
743 E : if (enable_page_protections_)
744 E : BlockProtectNone(*block_info, shadow_);
745 :
746 : if (block_info->header->magic != kBlockHeaderMagic ||
747 : !BlockChecksumIsValid(*block_info) ||
748 E : !BlockBodyIsValid(*block_info)) {
749 E : if (ShouldReportCorruptBlock(block_info))
750 E : ReportHeapError(block_info->header, CORRUPT_BLOCK);
751 E : return FreeCorruptBlock(block_info);
752 i : } else {
753 E : return FreePristineBlock(block_info);
754 : }
755 E : }
756 :
757 E : bool BlockHeapManager::FreeCorruptBlock(BlockInfo* block_info) {
758 E : DCHECK(initialized_);
759 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
760 E : ClearCorruptBlockMetadata(block_info);
761 E : return FreePristineBlock(block_info);
762 E : }
763 :
764 E : bool BlockHeapManager::FreePristineBlock(BlockInfo* block_info) {
765 E : DCHECK(initialized_);
766 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
767 E : BlockHeapInterface* heap = GetHeapFromId(block_info->trailer->heap_id);
768 :
769 E : if (enable_page_protections_) {
770 : // Remove block protections so the redzones may be modified.
771 E : BlockProtectNone(*block_info, shadow_);
772 : }
773 :
774 : // Return pointers to the stacks for reference counting purposes.
775 E : if (block_info->header->alloc_stack != nullptr) {
776 E : stack_cache_->ReleaseStackTrace(block_info->header->alloc_stack);
777 E : block_info->header->alloc_stack = nullptr;
778 : }
779 E : if (block_info->header->free_stack != nullptr) {
780 E : stack_cache_->ReleaseStackTrace(block_info->header->free_stack);
781 E : block_info->header->free_stack = nullptr;
782 : }
783 :
784 E : block_info->header->state = FREED_BLOCK;
785 :
786 : if ((heap->GetHeapFeatures() &
787 E : HeapInterface::kHeapReportsReservations) != 0) {
788 : shadow_->Poison(block_info->header,
789 : block_info->block_size,
790 E : kAsanReservedMarker);
791 E : } else {
792 E : shadow_->Unpoison(block_info->header, block_info->block_size);
793 : }
794 E : return heap->FreeBlock(*block_info);
795 E : }
796 :
797 E : bool BlockHeapManager::FreeUnguardedAlloc(HeapId heap_id, void* alloc) {
798 E : DCHECK(initialized_);
799 E : DCHECK(IsValidHeapId(heap_id, false));
800 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
801 :
802 : // Check if the allocation comes from the process heap.
803 E : if (heap == process_heap_) {
804 : // The shadow memory associated with this allocation is already green, so
805 : // no need to modify it.
806 E : return ::HeapFree(::GetProcessHeap(), 0, alloc) == TRUE;
807 : }
808 :
809 : // If the heap carves greenzones out of redzones, then color the allocation
810 : // red again. Otherwise, simply leave it green.
811 : if ((heap->GetHeapFeatures() &
812 E : HeapInterface::kHeapReportsReservations) != 0) {
813 : DCHECK_NE(0U, heap->GetHeapFeatures() &
814 i : HeapInterface::kHeapSupportsGetAllocationSize);
815 i : shadow_->Poison(alloc, Size(heap_id, alloc), kAsanReservedMarker);
816 : }
817 :
818 E : return heap->Free(alloc);
819 E : }
820 :
821 E : void BlockHeapManager::ClearCorruptBlockMetadata(BlockInfo* block_info) {
822 E : DCHECK(initialized_);
823 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
824 E : DCHECK_NE(static_cast<BlockHeader*>(nullptr), block_info->header);
825 :
826 : // Set the invalid stack captures to nullptr.
827 : if (!stack_cache_->StackCapturePointerIsValid(
828 E : block_info->header->alloc_stack)) {
829 i : block_info->header->alloc_stack = nullptr;
830 : }
831 : if (!stack_cache_->StackCapturePointerIsValid(
832 E : block_info->header->free_stack)) {
833 E : block_info->header->free_stack = nullptr;
834 : }
835 E : }
836 :
837 E : void BlockHeapManager::ReportHeapError(void* address, BadAccessKind kind) {
838 E : DCHECK(initialized_);
839 E : DCHECK_NE(static_cast<void*>(nullptr), address);
840 :
841 : // Collect information about the error.
842 E : AsanErrorInfo error_info = {};
843 E : ::RtlCaptureContext(&error_info.context);
844 E : error_info.access_mode = agent::asan::ASAN_UNKNOWN_ACCESS;
845 E : error_info.location = address;
846 E : error_info.error_type = kind;
847 E : ErrorInfoGetBadAccessInformation(shadow_, stack_cache_, &error_info);
848 E : agent::common::StackCapture stack;
849 E : stack.InitFromStack();
850 E : error_info.crash_stack_id = stack.relative_stack_id();
851 :
852 : // We expect a callback to be set.
853 E : DCHECK(!heap_error_callback_.is_null());
854 E : heap_error_callback_.Run(&error_info);
855 E : }
856 :
857 E : void BlockHeapManager::InitInternalHeap() {
858 E : DCHECK_EQ(static_cast<HeapInterface*>(nullptr), internal_heap_.get());
859 : DCHECK_EQ(static_cast<HeapInterface*>(nullptr),
860 E : internal_win_heap_.get());
861 :
862 E : internal_win_heap_.reset(new heaps::WinHeap);
863 : internal_heap_.reset(
864 E : new heaps::InternalHeap(memory_notifier_, internal_win_heap_.get()));
865 E : }
866 :
867 E : void BlockHeapManager::InitProcessHeap() {
868 E : DCHECK_EQ(static_cast<BlockHeapInterface*>(nullptr), process_heap_);
869 E : process_heap_underlying_heap_ = new heaps::WinHeap(::GetProcessHeap());
870 E : process_heap_ = new heaps::SimpleBlockHeap(process_heap_underlying_heap_);
871 : underlying_heaps_map_.insert(std::make_pair(process_heap_,
872 E : process_heap_underlying_heap_));
873 E : HeapMetadata heap_metadata = { &shared_quarantine_, false };
874 E : auto result = heaps_.insert(std::make_pair(process_heap_, heap_metadata));
875 E : process_heap_id_ = GetHeapId(result);
876 E : }
877 :
878 E : bool BlockHeapManager::MayUseLargeBlockHeap(size_t bytes) const {
879 E : DCHECK(initialized_);
880 E : if (!parameters_.enable_large_block_heap)
881 E : return false;
882 E : if (bytes >= parameters_.large_allocation_threshold)
883 E : return true;
884 :
885 : // If we get here we're treating a small allocation. If the allocation
886 : // filter is in effect and the flag set then allow it.
887 E : if (parameters_.enable_allocation_filter && allocation_filter_flag())
888 E : return true;
889 :
890 E : return false;
891 E : }
892 :
893 E : bool BlockHeapManager::MayUseZebraBlockHeap(size_t bytes) const {
894 E : DCHECK(initialized_);
895 E : if (!parameters_.enable_zebra_block_heap)
896 E : return false;
897 E : if (bytes > ZebraBlockHeap::kMaximumBlockAllocationSize)
898 E : return false;
899 :
900 : // If the allocation filter is in effect only allow filtered allocations
901 : // into the zebra heap.
902 E : if (parameters_.enable_allocation_filter)
903 E : return allocation_filter_flag();
904 :
905 : // Otherwise, allow everything through.
906 E : return true;
907 E : }
908 :
909 E : bool BlockHeapManager::ShouldReportCorruptBlock(const BlockInfo* block_info) {
910 E : DCHECK_NE(static_cast<const BlockInfo*>(nullptr), block_info);
911 :
912 E : if (!parameters_.prevent_duplicate_corruption_crashes)
913 E : return true;
914 :
915 E : const common::StackCapture* alloc_stack = block_info->header->alloc_stack;
916 E : StackId relative_alloc_stack_id = alloc_stack->relative_stack_id();
917 :
918 : // Look at the registry cache to see if an error has already been reported
919 : // for this allocation stack trace, if so prevent from reporting another one.
920 E : if (corrupt_block_registry_cache_.DoesIdExist(relative_alloc_stack_id))
921 E : return false;
922 :
923 : // Update the corrupt block registry cache to prevent from crashing if we
924 : // encounter a corrupt block that has the same allocation stack trace.
925 E : corrupt_block_registry_cache_.AddOrUpdateStackId(relative_alloc_stack_id);
926 E : return true;
927 E : }
928 :
929 : } // namespace heap_managers
930 : } // namespace asan
931 : } // namespace agent
|