1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/heap_managers/block_heap_manager.h"
16 :
17 : #include <algorithm>
18 : #include <utility>
19 :
20 : #include "base/bind.h"
21 : #include "base/rand_util.h"
22 : #include "syzygy/agent/asan/page_protection_helpers.h"
23 : #include "syzygy/agent/asan/runtime.h"
24 : #include "syzygy/agent/asan/shadow.h"
25 : #include "syzygy/agent/asan/timed_try.h"
26 : #include "syzygy/agent/asan/heaps/internal_heap.h"
27 : #include "syzygy/agent/asan/heaps/large_block_heap.h"
28 : #include "syzygy/agent/asan/heaps/simple_block_heap.h"
29 : #include "syzygy/agent/asan/heaps/win_heap.h"
30 : #include "syzygy/agent/asan/heaps/zebra_block_heap.h"
31 : #include "syzygy/common/asan_parameters.h"
32 :
33 : namespace agent {
34 : namespace asan {
35 : namespace heap_managers {
36 :
37 : namespace {
38 :
39 : typedef HeapManagerInterface::HeapId HeapId;
40 : using heaps::LargeBlockHeap;
41 : using heaps::ZebraBlockHeap;
42 :
43 : // For now, the overbudget size is always set to 20% of the size of the
44 : // quarantine.
45 : // TODO(georgesak): allow this to be changed through the parameters.
46 : enum : uint32_t { kOverbudgetSizePercentage = 20 };
47 :
48 : // Return the position of the most significant bit in a 32 bit unsigned value.
49 : size_t GetMSBIndex(size_t n) {
50 : // Algorithm taken from
51 : // http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog
52 : size_t r = 0;
53 : size_t shift = 0;
54 : r = (n > 0xFFFF) << 4;
55 : n >>= r;
56 : shift = (n > 0xFF) << 3;
57 : n >>= shift;
58 : r |= shift;
59 : shift = (n > 0xF) << 2;
60 : n >>= shift;
61 : r |= shift;
62 : shift = (n > 0x3) << 1;
63 : n >>= shift;
64 : r |= shift;
65 : r |= (n >> 1);
66 : return r;
67 : }
68 :
69 : } // namespace
70 :
71 : BlockHeapManager::BlockHeapManager(Shadow* shadow,
72 : StackCaptureCache* stack_cache,
73 : MemoryNotifierInterface* memory_notifier)
74 E : : shadow_(shadow),
75 E : stack_cache_(stack_cache),
76 E : memory_notifier_(memory_notifier),
77 E : initialized_(false),
78 E : process_heap_(nullptr),
79 E : process_heap_underlying_heap_(nullptr),
80 E : process_heap_id_(0),
81 E : zebra_block_heap_(nullptr),
82 E : zebra_block_heap_id_(0),
83 E : large_block_heap_id_(0),
84 E : locked_heaps_(nullptr),
85 E : enable_page_protections_(true),
86 E : corrupt_block_registry_cache_(L"SyzyAsanCorruptBlocks") {
87 E : DCHECK_NE(static_cast<Shadow*>(nullptr), shadow);
88 E : DCHECK_NE(static_cast<StackCaptureCache*>(nullptr), stack_cache);
89 E : DCHECK_NE(static_cast<MemoryNotifierInterface*>(nullptr), memory_notifier);
90 E : SetDefaultAsanParameters(¶meters_);
91 :
92 : // Initialize the allocation-filter flag (using Thread Local Storage).
93 E : allocation_filter_flag_tls_ = ::TlsAlloc();
94 E : CHECK_NE(TLS_OUT_OF_INDEXES, allocation_filter_flag_tls_);
95 : // And disable it by default.
96 E : set_allocation_filter_flag(false);
97 E : }
98 :
99 E : BlockHeapManager::~BlockHeapManager() {
100 E : TearDownHeapManager();
101 E : }
102 :
103 E : void BlockHeapManager::Init() {
104 E : DCHECK(!initialized_);
105 :
106 : {
107 E : base::AutoLock lock(lock_);
108 E : InitInternalHeap();
109 E : corrupt_block_registry_cache_.Init();
110 E : }
111 :
112 : // This takes care of its own locking, as its reentrant.
113 E : PropagateParameters();
114 :
115 : {
116 E : base::AutoLock lock(lock_);
117 E : InitProcessHeap();
118 E : initialized_ = true;
119 E : }
120 E : }
121 :
122 E : HeapId BlockHeapManager::CreateHeap() {
123 E : DCHECK(initialized_);
124 :
125 : // Creates the underlying heap used by this heap.
126 : // TODO(chrisha): We should be using the internal allocator for these
127 : // heap allocations!
128 E : HeapInterface* underlying_heap = new heaps::WinHeap();
129 : // Creates the heap.
130 E : BlockHeapInterface* heap = new heaps::SimpleBlockHeap(underlying_heap);
131 :
132 E : base::AutoLock lock(lock_);
133 E : underlying_heaps_map_.insert(std::make_pair(heap, underlying_heap));
134 E : HeapMetadata metadata = { &shared_quarantine_, false };
135 E : auto result = heaps_.insert(std::make_pair(heap, metadata));
136 E : return GetHeapId(result);
137 E : }
138 :
139 E : bool BlockHeapManager::DestroyHeap(HeapId heap_id) {
140 E : DCHECK(initialized_);
141 E : DCHECK(IsValidHeapId(heap_id, false));
142 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
143 E : BlockQuarantineInterface* quarantine = GetQuarantineFromId(heap_id);
144 :
145 : {
146 : // Move the heap from the active to the dying list. This prevents it from
147 : // being used while it's being torn down.
148 E : base::AutoLock lock(lock_);
149 E : auto iter = heaps_.find(heap);
150 E : iter->second.is_dying = true;
151 E : }
152 :
153 : // Destroy the heap and flush its quarantine. This is done outside of the
154 : // lock to both reduce contention and to ensure that we can re-enter the
155 : // block heap manager if corruption is found during the heap tear down.
156 E : DestroyHeapContents(heap, quarantine);
157 :
158 : // Free up any resources associated with the heap. This modifies block
159 : // heap manager internals, so must be called under a lock.
160 : {
161 E : base::AutoLock lock(lock_);
162 E : DestroyHeapResourcesUnlocked(heap, quarantine);
163 E : heaps_.erase(heaps_.find(heap));
164 E : }
165 :
166 E : return true;
167 E : }
168 :
169 E : void* BlockHeapManager::Allocate(HeapId heap_id, uint32_t bytes) {
170 E : DCHECK(initialized_);
171 E : DCHECK(IsValidHeapId(heap_id, false));
172 :
173 : // Some allocations can pass through without instrumentation.
174 E : if (parameters_.allocation_guard_rate < 1.0 &&
175 : base::RandDouble() >= parameters_.allocation_guard_rate) {
176 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
177 E : void* alloc = heap->Allocate(bytes);
178 : if ((heap->GetHeapFeatures() &
179 E : HeapInterface::kHeapReportsReservations) != 0) {
180 i : shadow_->Unpoison(alloc, bytes);
181 : }
182 E : return alloc;
183 : }
184 :
185 : // Capture the current stack. InitFromStack is inlined to preserve the
186 : // greatest number of stack frames.
187 E : common::StackCapture stack;
188 E : stack.InitFromStack();
189 :
190 : // Build the set of heaps that will be used to satisfy the allocation. This
191 : // is a stack of heaps, and they will be tried in the reverse order they are
192 : // inserted.
193 :
194 : // We can always use the heap that was passed in.
195 E : HeapId heaps[3] = { heap_id, 0, 0 };
196 E : size_t heap_count = 1;
197 E : if (MayUseLargeBlockHeap(bytes)) {
198 E : DCHECK_LT(heap_count, arraysize(heaps));
199 E : heaps[heap_count++] = large_block_heap_id_;
200 : }
201 :
202 E : if (MayUseZebraBlockHeap(bytes)) {
203 E : DCHECK_LT(heap_count, arraysize(heaps));
204 E : heaps[heap_count++] = zebra_block_heap_id_;
205 : }
206 :
207 : // Use the selected heaps to try to satisfy the allocation.
208 E : void* alloc = nullptr;
209 E : BlockLayout block_layout = {};
210 E : for (int i = static_cast<int>(heap_count) - 1; i >= 0; --i) {
211 E : BlockHeapInterface* heap = GetHeapFromId(heaps[i]);
212 E : alloc = heap->AllocateBlock(
213 : bytes,
214 : 0,
215 : parameters_.trailer_padding_size + sizeof(BlockTrailer),
216 : &block_layout);
217 E : if (alloc != nullptr) {
218 E : heap_id = heaps[i];
219 E : break;
220 : }
221 E : }
222 :
223 : // The allocation can fail if we're out of memory.
224 E : if (alloc == nullptr)
225 E : return nullptr;
226 :
227 E : DCHECK_NE(static_cast<void*>(nullptr), alloc);
228 E : DCHECK_EQ(0u, reinterpret_cast<size_t>(alloc) % kShadowRatio);
229 E : BlockInfo block = {};
230 E : BlockInitialize(block_layout, alloc, false, &block);
231 :
232 : // Poison the redzones in the shadow memory as early as possible.
233 E : shadow_->PoisonAllocatedBlock(block);
234 :
235 E : block.header->alloc_stack = stack_cache_->SaveStackTrace(stack);
236 E : block.header->free_stack = nullptr;
237 E : block.header->state = ALLOCATED_BLOCK;
238 :
239 E : block.trailer->heap_id = heap_id;
240 :
241 E : BlockSetChecksum(block);
242 E : if (enable_page_protections_)
243 E : BlockProtectRedzones(block, shadow_);
244 :
245 E : return block.body;
246 E : }
247 :
248 E : bool BlockHeapManager::Free(HeapId heap_id, void* alloc) {
249 E : DCHECK(initialized_);
250 E : DCHECK(IsValidHeapId(heap_id, false));
251 :
252 : // The standard allows calling free on a null pointer.
253 E : if (alloc == nullptr)
254 E : return true;
255 :
256 E : BlockInfo block_info = {};
257 E : if (!shadow_->IsBeginningOfBlockBody(alloc) ||
258 : !GetBlockInfo(shadow_, reinterpret_cast<BlockBody*>(alloc),
259 : &block_info)) {
260 E : return FreeUnguardedAlloc(heap_id, alloc);
261 : }
262 :
263 E : if (enable_page_protections_) {
264 : // Precondition: A valid guarded allocation.
265 E : BlockProtectNone(block_info, shadow_);
266 : }
267 :
268 E : if (!BlockChecksumIsValid(block_info)) {
269 : // The free stack hasn't yet been set, but may have been filled with junk.
270 : // Reset it.
271 E : block_info.header->free_stack = nullptr;
272 E : if (ShouldReportCorruptBlock(&block_info))
273 E : ReportHeapError(alloc, CORRUPT_BLOCK);
274 E : return FreeCorruptBlock(&block_info);
275 : }
276 :
277 E : if (block_info.header->state == QUARANTINED_BLOCK ||
278 : block_info.header->state == QUARANTINED_FLOODED_BLOCK) {
279 E : ReportHeapError(alloc, DOUBLE_FREE);
280 E : return false;
281 : }
282 :
283 : // heap_id is just a hint, the block trailer contains the heap used for the
284 : // allocation.
285 E : heap_id = block_info.trailer->heap_id;
286 E : BlockQuarantineInterface* quarantine = GetQuarantineFromId(heap_id);
287 :
288 : // Poison the released alloc (marked as freed) and quarantine the block.
289 : // Note that the original data is left intact. This may make it easier
290 : // to debug a crash report/dump on access to a quarantined block.
291 E : shadow_->MarkAsFreed(block_info.body, block_info.body_size);
292 :
293 : // We need to update the block's metadata before pushing it into the
294 : // quarantine, otherwise a concurrent thread might try to pop it while its in
295 : // an invalid state.
296 E : common::StackCapture stack;
297 E : stack.InitFromStack();
298 E : block_info.header->free_stack =
299 : stack_cache_->SaveStackTrace(stack);
300 E : block_info.trailer->free_ticks = ::GetTickCount();
301 E : block_info.trailer->free_tid = ::GetCurrentThreadId();
302 :
303 : // Flip a coin and sometimes flood the block. When flooded, overwrites are
304 : // clearly visible; when not flooded, the original contents are left visible.
305 E : bool flood = parameters_.quarantine_flood_fill_rate > 0.0 &&
306 : base::RandDouble() <= parameters_.quarantine_flood_fill_rate;
307 E : if (flood) {
308 E : block_info.header->state = QUARANTINED_FLOODED_BLOCK;
309 E : ::memset(block_info.body, kBlockFloodFillByte, block_info.body_size);
310 E : } else {
311 E : block_info.header->state = QUARANTINED_BLOCK;
312 : }
313 :
314 : // Update the block checksum.
315 E : BlockSetChecksum(block_info);
316 :
317 E : CompactBlockInfo compact = {};
318 E : ConvertBlockInfo(block_info, &compact);
319 :
320 E : PushResult push_result = {};
321 : {
322 E : BlockQuarantineInterface::AutoQuarantineLock quarantine_lock(
323 : quarantine, compact);
324 E : push_result = quarantine->Push(compact);
325 E : if (!push_result.push_successful) {
326 E : TrimOrScheduleIfNecessary(push_result.trim_status, quarantine);
327 E : return FreePristineBlock(&block_info);
328 : }
329 :
330 E : if (enable_page_protections_) {
331 : // The recently pushed block can be popped out in TrimQuarantine if the
332 : // quarantine size is 0, in that case TrimQuarantine takes care of
333 : // properly unprotecting and freeing the block. If the protection is set
334 : // blindly after TrimQuarantine we could end up protecting a free (not
335 : // quarantined, not allocated) block.
336 E : BlockProtectAll(block_info, shadow_);
337 : }
338 E : }
339 :
340 E : TrimOrScheduleIfNecessary(push_result.trim_status, quarantine);
341 :
342 E : return true;
343 E : }
344 :
345 E : uint32_t BlockHeapManager::Size(HeapId heap_id, const void* alloc) {
346 E : DCHECK(initialized_);
347 E : DCHECK(IsValidHeapId(heap_id, false));
348 :
349 E : if (shadow_->IsBeginningOfBlockBody(alloc)) {
350 E : BlockInfo block_info = {};
351 E : if (!GetBlockInfo(shadow_, reinterpret_cast<const BlockBody*>(alloc),
352 : &block_info)) {
353 i : return 0;
354 : }
355 E : return block_info.body_size;
356 : }
357 :
358 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
359 : if ((heap->GetHeapFeatures() &
360 E : HeapInterface::kHeapSupportsGetAllocationSize) != 0) {
361 E : return heap->GetAllocationSize(alloc);
362 i : } else {
363 i : return 0;
364 : }
365 E : }
366 :
367 E : void BlockHeapManager::Lock(HeapId heap_id) {
368 E : DCHECK(initialized_);
369 E : DCHECK(IsValidHeapId(heap_id, false));
370 E : GetHeapFromId(heap_id)->Lock();
371 E : }
372 :
373 E : void BlockHeapManager::Unlock(HeapId heap_id) {
374 E : DCHECK(initialized_);
375 E : DCHECK(IsValidHeapId(heap_id, false));
376 E : GetHeapFromId(heap_id)->Unlock();
377 E : }
378 :
379 E : void BlockHeapManager::BestEffortLockAll() {
380 E : DCHECK(initialized_);
381 E : static const base::TimeDelta kTryTime(base::TimeDelta::FromMilliseconds(50));
382 E : lock_.Acquire();
383 :
384 : // Create room to store the list of locked heaps. This must use the internal
385 : // heap as any other heap may be involved in a crash and locked right now.
386 E : DCHECK_EQ(static_cast<HeapInterface**>(nullptr), locked_heaps_);
387 E : uint32_t alloc_size = sizeof(HeapInterface*) *
388 : static_cast<uint32_t>(heaps_.size() + 1);
389 E : locked_heaps_ = reinterpret_cast<HeapInterface**>(internal_heap_->Allocate(
390 : alloc_size));
391 E : DCHECK_NE(static_cast<HeapInterface**>(nullptr), locked_heaps_);
392 E : ::memset(locked_heaps_, 0, alloc_size);
393 :
394 E : size_t index = 0;
395 E : for (auto& heap_quarantine_pair : heaps_) {
396 E : HeapInterface* heap = heap_quarantine_pair.first;
397 E : if (TimedTry(kTryTime, heap)) {
398 E : locked_heaps_[index] = heap;
399 E : ++index;
400 : }
401 E : }
402 E : }
403 :
404 E : void BlockHeapManager::UnlockAll() {
405 E : DCHECK(initialized_);
406 E : lock_.AssertAcquired();
407 E : DCHECK_NE(static_cast<HeapInterface**>(nullptr), locked_heaps_);
408 E : for (HeapInterface** heap = locked_heaps_; *heap != nullptr; ++heap)
409 E : (*heap)->Unlock();
410 E : internal_heap_->Free(locked_heaps_);
411 E : locked_heaps_ = nullptr;
412 E : lock_.Release();
413 E : }
414 :
415 : void BlockHeapManager::set_parameters(
416 E : const ::common::AsanParameters& parameters) {
417 : {
418 E : base::AutoLock lock(lock_);
419 E : parameters_ = parameters;
420 E : }
421 :
422 : // Releases the lock before propagating the parameters.
423 E : if (initialized_)
424 E : PropagateParameters();
425 E : }
426 :
427 E : void BlockHeapManager::TearDownHeapManager() {
428 E : base::AutoLock lock(lock_);
429 :
430 : // This would indicate that we have outstanding heap locks being
431 : // held. This shouldn't happen as |locked_heaps_| is only non-null
432 : // under |lock_|.
433 E : DCHECK_EQ(static_cast<HeapInterface**>(nullptr), locked_heaps_);
434 :
435 : // Delete all the heaps. This must be done manually to ensure that
436 : // all references to internal_heap_ have been cleaned up.
437 E : HeapQuarantineMap::iterator iter_heaps = heaps_.begin();
438 E : for (; iter_heaps != heaps_.end(); ++iter_heaps) {
439 E : DCHECK(!iter_heaps->second.is_dying);
440 E : iter_heaps->second.is_dying = true;
441 E : DestroyHeapContents(iter_heaps->first, iter_heaps->second.quarantine);
442 E : DestroyHeapResourcesUnlocked(iter_heaps->first,
443 : iter_heaps->second.quarantine);
444 E : }
445 : // Clear the active heap list.
446 E : heaps_.clear();
447 :
448 : // Clear the specialized heap references since they were deleted.
449 E : process_heap_ = nullptr;
450 E : process_heap_underlying_heap_ = nullptr;
451 E : process_heap_id_ = 0;
452 E : zebra_block_heap_ = nullptr;
453 E : zebra_block_heap_id_ = 0;
454 E : large_block_heap_id_ = 0;
455 :
456 : // Free the allocation-filter flag (TLS).
457 E : if (allocation_filter_flag_tls_ != TLS_OUT_OF_INDEXES) {
458 E : ::TlsFree(allocation_filter_flag_tls_);
459 E : allocation_filter_flag_tls_ = TLS_OUT_OF_INDEXES;
460 : }
461 E : }
462 :
463 : HeapId BlockHeapManager::GetHeapId(
464 E : HeapQuarantineMap::iterator iterator) const {
465 E : HeapQuarantinePair* hq_pair = &(*iterator);
466 E : return reinterpret_cast<HeapId>(hq_pair);
467 E : }
468 :
469 : HeapId BlockHeapManager::GetHeapId(
470 E : const std::pair<HeapQuarantineMap::iterator, bool>& insert_result) const {
471 E : return GetHeapId(insert_result.first);
472 E : }
473 :
474 : bool BlockHeapManager::IsValidHeapIdUnsafe(HeapId heap_id, bool allow_dying) {
475 : DCHECK(initialized_);
476 : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
477 : if (!IsValidHeapIdUnsafeUnlockedImpl1(hq))
478 : return false;
479 : base::AutoLock auto_lock(lock_);
480 : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
481 : return false;
482 : return true;
483 : }
484 :
485 : bool BlockHeapManager::IsValidHeapIdUnsafeUnlocked(
486 : HeapId heap_id, bool allow_dying) {
487 : DCHECK(initialized_);
488 : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
489 : if (!IsValidHeapIdUnsafeUnlockedImpl1(hq))
490 : return false;
491 : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
492 : return false;
493 : return true;
494 : }
495 :
496 E : bool BlockHeapManager::IsValidHeapId(HeapId heap_id, bool allow_dying) {
497 E : DCHECK(initialized_);
498 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
499 E : if (!IsValidHeapIdUnlockedImpl1(hq))
500 i : return false;
501 E : base::AutoLock auto_lock(lock_);
502 E : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
503 i : return false;
504 E : return true;
505 E : }
506 :
507 E : bool BlockHeapManager::IsValidHeapIdUnlocked(HeapId heap_id, bool allow_dying) {
508 E : DCHECK(initialized_);
509 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
510 E : if (!IsValidHeapIdUnlockedImpl1(hq))
511 E : return false;
512 E : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
513 E : return false;
514 E : return true;
515 E : }
516 :
517 : bool BlockHeapManager::IsValidHeapIdUnsafeUnlockedImpl1(
518 E : HeapQuarantinePair* hq) {
519 : // First check to see if it looks like it has the right shape. This could
520 : // cause an invalid access if the heap_id is completely a wild value.
521 E : if (hq == nullptr)
522 i : return false;
523 E : if (hq->first == nullptr || hq->second.quarantine == nullptr)
524 i : return false;
525 E : return true;
526 E : }
527 :
528 : bool BlockHeapManager::IsValidHeapIdUnlockedImpl1(
529 i : HeapQuarantinePair* hq) {
530 : // Run this in an exception handler, as if it's a really invalid heap id
531 : // we could end up reading from inaccessible memory.
532 i : __try {
533 i : if (!IsValidHeapIdUnsafeUnlockedImpl1(hq))
534 i : return false;
535 i : } __except(EXCEPTION_EXECUTE_HANDLER) {
536 i : return false;
537 : }
538 i : return true;
539 i : }
540 :
541 : bool BlockHeapManager::IsValidHeapIdUnlockedImpl2(HeapQuarantinePair* hq,
542 E : bool allow_dying) {
543 : // Look in the list of live heaps first.
544 E : auto it = heaps_.find(hq->first);
545 E : if (it != heaps_.end()) {
546 E : HeapId heap_id = GetHeapId(it);
547 E : if (heap_id == reinterpret_cast<HeapId>(hq))
548 E : return !it->second.is_dying || allow_dying;
549 : }
550 :
551 E : return false;
552 E : }
553 :
554 E : BlockHeapInterface* BlockHeapManager::GetHeapFromId(HeapId heap_id) {
555 E : DCHECK_NE(reinterpret_cast<HeapId>(nullptr), heap_id);
556 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
557 E : DCHECK_NE(static_cast<BlockHeapInterface*>(nullptr), hq->first);
558 E : return hq->first;
559 E : }
560 :
561 : BlockQuarantineInterface* BlockHeapManager::GetQuarantineFromId(
562 E : HeapId heap_id) {
563 E : DCHECK_NE(reinterpret_cast<HeapId>(nullptr), heap_id);
564 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
565 E : DCHECK_NE(static_cast<BlockQuarantineInterface*>(nullptr),
566 E : hq->second.quarantine);
567 E : return hq->second.quarantine;
568 E : }
569 :
570 E : void BlockHeapManager::PropagateParameters() {
571 : // The internal heap should already be setup.
572 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), internal_heap_.get());
573 :
574 E : size_t quarantine_size = shared_quarantine_.max_quarantine_size();
575 E : shared_quarantine_.set_max_quarantine_size(parameters_.quarantine_size);
576 E : shared_quarantine_.set_max_object_size(parameters_.quarantine_block_size);
577 :
578 : // Trim the quarantine if its maximum size has decreased.
579 E : if (initialized_ && quarantine_size > parameters_.quarantine_size)
580 E : TrimQuarantine(TrimColor::YELLOW, &shared_quarantine_);
581 :
582 E : if (parameters_.enable_zebra_block_heap && zebra_block_heap_ == nullptr) {
583 : // Initialize the zebra heap only if it isn't already initialized.
584 : // The zebra heap cannot be resized once created.
585 E : base::AutoLock lock(lock_);
586 E : zebra_block_heap_ = new ZebraBlockHeap(parameters_.zebra_block_heap_size,
587 : memory_notifier_,
588 : internal_heap_.get());
589 : // The zebra block heap is its own quarantine.
590 E : HeapMetadata heap_metadata = { zebra_block_heap_, false };
591 E : auto result = heaps_.insert(std::make_pair(zebra_block_heap_,
592 : heap_metadata));
593 E : zebra_block_heap_id_ = GetHeapId(result);
594 E : }
595 :
596 E : if (zebra_block_heap_ != nullptr) {
597 E : zebra_block_heap_->set_quarantine_ratio(
598 : parameters_.zebra_block_heap_quarantine_ratio);
599 E : if (initialized_)
600 E : TrimQuarantine(TrimColor::YELLOW, zebra_block_heap_);
601 : }
602 :
603 : // Create the LargeBlockHeap if need be.
604 E : if (parameters_.enable_large_block_heap && large_block_heap_id_ == 0) {
605 E : base::AutoLock lock(lock_);
606 E : BlockHeapInterface* heap = new LargeBlockHeap(
607 : memory_notifier_, internal_heap_.get());
608 E : HeapMetadata metadata = { &shared_quarantine_, false };
609 E : auto result = heaps_.insert(std::make_pair(heap, metadata));
610 E : large_block_heap_id_ = GetHeapId(result);
611 E : }
612 :
613 : // TODO(chrisha|sebmarchand): Clean up existing blocks that exceed the
614 : // maximum block size? This will require an entirely new TrimQuarantine
615 : // function. Since this is never changed at runtime except in our
616 : // unittests, this is not clearly useful.
617 E : }
618 :
619 E : bool BlockHeapManager::allocation_filter_flag() const {
620 E : return ::TlsGetValue(allocation_filter_flag_tls_) != 0;
621 E : }
622 :
623 E : void BlockHeapManager::set_allocation_filter_flag(bool value) {
624 E : ::TlsSetValue(allocation_filter_flag_tls_, reinterpret_cast<void*>(value));
625 E : }
626 :
627 E : void BlockHeapManager::EnableDeferredFreeThread() {
628 : // The thread will be shutdown before this BlockHeapManager object is
629 : // destroyed, so passing |this| unretained is safe.
630 E : EnableDeferredFreeThreadWithCallback(base::Bind(
631 : &BlockHeapManager::DeferredFreeDoWork, base::Unretained(this)));
632 E : }
633 :
634 E : void BlockHeapManager::DisableDeferredFreeThread() {
635 E : DCHECK(IsDeferredFreeThreadRunning());
636 : // Stop the thread and wait for it to exit.
637 E : base::AutoLock lock(deferred_free_thread_lock_);
638 E : if (deferred_free_thread_)
639 E : deferred_free_thread_->Stop();
640 E : deferred_free_thread_.reset();
641 : // Set the overbudget size to 0 to remove the hysteresis.
642 E : shared_quarantine_.SetOverbudgetSize(0);
643 E : }
644 :
645 E : bool BlockHeapManager::IsDeferredFreeThreadRunning() {
646 E : base::AutoLock lock(deferred_free_thread_lock_);
647 E : return deferred_free_thread_ != nullptr;
648 E : }
649 :
650 E : HeapType BlockHeapManager::GetHeapTypeUnlocked(HeapId heap_id) {
651 E : DCHECK(initialized_);
652 E : DCHECK(IsValidHeapIdUnlocked(heap_id, true));
653 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
654 E : return heap->GetHeapType();
655 E : }
656 :
657 : bool BlockHeapManager::DestroyHeapContents(
658 : BlockHeapInterface* heap,
659 E : BlockQuarantineInterface* quarantine) {
660 E : DCHECK(initialized_);
661 E : DCHECK_NE(static_cast<BlockHeapInterface*>(nullptr), heap);
662 E : DCHECK_NE(static_cast<BlockQuarantineInterface*>(nullptr), quarantine);
663 :
664 : // Starts by removing all the block from this heap from the quarantine.
665 E : BlockQuarantineInterface::ObjectVector blocks_vec;
666 :
667 : // We'll keep the blocks that don't belong to this heap in a temporary list.
668 : // While this isn't optimal in terms of performance, destroying a heap isn't a
669 : // common operation.
670 : // TODO(sebmarchand): Add a version of the ShardedBlockQuarantine::Empty
671 : // method that accepts a functor to filter the blocks to remove.
672 E : BlockQuarantineInterface::ObjectVector blocks_to_reinsert;
673 E : quarantine->Empty(&blocks_vec);
674 :
675 E : for (const auto& iter_block : blocks_vec) {
676 E : BlockInfo expanded = {};
677 E : ConvertBlockInfo(iter_block, &expanded);
678 :
679 E : if (enable_page_protections_) {
680 : // Remove protection to enable access to the block header.
681 E : BlockProtectNone(expanded, shadow_);
682 : }
683 :
684 E : BlockHeapInterface* block_heap = GetHeapFromId(expanded.trailer->heap_id);
685 :
686 E : if (block_heap == heap) {
687 E : FreeBlock(iter_block);
688 E : } else {
689 E : blocks_to_reinsert.push_back(iter_block);
690 : }
691 E : }
692 :
693 : // Restore the blocks that don't belong to this quarantine.
694 E : for (const auto& iter_block : blocks_to_reinsert) {
695 E : BlockInfo expanded = {};
696 E : ConvertBlockInfo(iter_block, &expanded);
697 :
698 E : BlockQuarantineInterface::AutoQuarantineLock quarantine_lock(quarantine,
699 : iter_block);
700 E : if (quarantine->Push(iter_block).push_successful) {
701 E : if (enable_page_protections_) {
702 : // Restore protection to quarantined block.
703 E : BlockProtectAll(expanded, shadow_);
704 : }
705 E : } else {
706 : // Avoid memory leak.
707 i : FreeBlock(iter_block);
708 : }
709 E : }
710 :
711 E : return true;
712 E : }
713 :
714 : void BlockHeapManager::DestroyHeapResourcesUnlocked(
715 : BlockHeapInterface* heap,
716 E : BlockQuarantineInterface* quarantine) {
717 : // If the heap has an underlying heap then free it as well.
718 : {
719 E : auto iter = underlying_heaps_map_.find(heap);
720 E : if (iter != underlying_heaps_map_.end()) {
721 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), iter->second);
722 E : delete iter->second;
723 E : underlying_heaps_map_.erase(iter);
724 : }
725 : }
726 :
727 E : delete heap;
728 E : }
729 :
730 : void BlockHeapManager::TrimQuarantine(TrimColor stop_color,
731 E : BlockQuarantineInterface* quarantine) {
732 E : DCHECK(initialized_);
733 E : DCHECK_NE(static_cast<BlockQuarantineInterface*>(nullptr), quarantine);
734 :
735 : // Trim the quarantine to the required color.
736 E : if (parameters_.quarantine_size == 0) {
737 i : BlockQuarantineInterface::ObjectVector blocks_to_free;
738 i : quarantine->Empty(&blocks_to_free);
739 i : for (const auto& block : blocks_to_free)
740 i : FreeBlock(block);
741 i : } else {
742 E : CompactBlockInfo compact = {};
743 E : while (true) {
744 E : PopResult result = quarantine->Pop(&compact);
745 E : if (!result.pop_successful)
746 E : break;
747 E : FreeBlock(compact);
748 E : if (result.trim_color <= stop_color)
749 E : break;
750 E : }
751 : }
752 E : }
753 :
754 E : void BlockHeapManager::FreeBlock(const BlockQuarantineInterface::Object& obj) {
755 E : BlockInfo expanded = {};
756 E : ConvertBlockInfo(obj, &expanded);
757 E : CHECK(FreePotentiallyCorruptBlock(&expanded));
758 E : }
759 :
760 : namespace {
761 :
762 : // A tiny helper function that checks if a quarantined filled block has a valid
763 : // body. If the block is not of that type simply always returns true.
764 E : bool BlockBodyIsValid(const BlockInfo& block_info) {
765 E : if (block_info.header->state != QUARANTINED_FLOODED_BLOCK)
766 E : return true;
767 E : if (BlockBodyIsFloodFilled(block_info))
768 E : return true;
769 E : return false;
770 E : }
771 :
772 : } // namespace
773 :
774 E : bool BlockHeapManager::FreePotentiallyCorruptBlock(BlockInfo* block_info) {
775 E : DCHECK(initialized_);
776 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
777 :
778 E : if (enable_page_protections_)
779 E : BlockProtectNone(*block_info, shadow_);
780 :
781 : if (block_info->header->magic != kBlockHeaderMagic ||
782 E : !BlockChecksumIsValid(*block_info) ||
783 : !BlockBodyIsValid(*block_info)) {
784 E : if (ShouldReportCorruptBlock(block_info))
785 E : ReportHeapError(block_info->header, CORRUPT_BLOCK);
786 E : return FreeCorruptBlock(block_info);
787 i : } else {
788 E : return FreePristineBlock(block_info);
789 : }
790 E : }
791 :
792 E : bool BlockHeapManager::FreeCorruptBlock(BlockInfo* block_info) {
793 E : DCHECK(initialized_);
794 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
795 E : ClearCorruptBlockMetadata(block_info);
796 E : return FreePristineBlock(block_info);
797 E : }
798 :
799 E : bool BlockHeapManager::FreePristineBlock(BlockInfo* block_info) {
800 E : DCHECK(initialized_);
801 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
802 E : BlockHeapInterface* heap = GetHeapFromId(block_info->trailer->heap_id);
803 :
804 E : if (enable_page_protections_) {
805 : // Remove block protections so the redzones may be modified.
806 E : BlockProtectNone(*block_info, shadow_);
807 : }
808 :
809 : // Return pointers to the stacks for reference counting purposes.
810 E : if (block_info->header->alloc_stack != nullptr) {
811 E : stack_cache_->ReleaseStackTrace(block_info->header->alloc_stack);
812 E : block_info->header->alloc_stack = nullptr;
813 : }
814 E : if (block_info->header->free_stack != nullptr) {
815 E : stack_cache_->ReleaseStackTrace(block_info->header->free_stack);
816 E : block_info->header->free_stack = nullptr;
817 : }
818 :
819 E : block_info->header->state = FREED_BLOCK;
820 :
821 : if ((heap->GetHeapFeatures() &
822 E : HeapInterface::kHeapReportsReservations) != 0) {
823 E : shadow_->Poison(block_info->header,
824 : block_info->block_size,
825 : kAsanReservedMarker);
826 E : } else {
827 E : shadow_->Unpoison(block_info->header, block_info->block_size);
828 : }
829 E : return heap->FreeBlock(*block_info);
830 E : }
831 :
832 E : bool BlockHeapManager::FreeUnguardedAlloc(HeapId heap_id, void* alloc) {
833 E : DCHECK(initialized_);
834 E : DCHECK(IsValidHeapId(heap_id, false));
835 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
836 :
837 : // Check if the allocation comes from the process heap.
838 E : if (heap == process_heap_) {
839 : // The shadow memory associated with this allocation is already green, so
840 : // no need to modify it.
841 E : return ::HeapFree(::GetProcessHeap(), 0, alloc) == TRUE;
842 : }
843 :
844 : // If the heap carves greenzones out of redzones, then color the allocation
845 : // red again. Otherwise, simply leave it green.
846 : if ((heap->GetHeapFeatures() &
847 E : HeapInterface::kHeapReportsReservations) != 0) {
848 i : DCHECK_NE(0U, heap->GetHeapFeatures() &
849 i : HeapInterface::kHeapSupportsGetAllocationSize);
850 i : shadow_->Poison(alloc, Size(heap_id, alloc), kAsanReservedMarker);
851 : }
852 :
853 E : return heap->Free(alloc);
854 E : }
855 :
856 E : void BlockHeapManager::ClearCorruptBlockMetadata(BlockInfo* block_info) {
857 E : DCHECK(initialized_);
858 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
859 E : DCHECK_NE(static_cast<BlockHeader*>(nullptr), block_info->header);
860 :
861 : // Set the invalid stack captures to nullptr.
862 E : if (!stack_cache_->StackCapturePointerIsValid(
863 : block_info->header->alloc_stack)) {
864 i : block_info->header->alloc_stack = nullptr;
865 : }
866 E : if (!stack_cache_->StackCapturePointerIsValid(
867 : block_info->header->free_stack)) {
868 E : block_info->header->free_stack = nullptr;
869 : }
870 E : }
871 :
872 E : void BlockHeapManager::ReportHeapError(void* address, BadAccessKind kind) {
873 E : DCHECK(initialized_);
874 E : DCHECK_NE(static_cast<void*>(nullptr), address);
875 :
876 : // Collect information about the error.
877 E : AsanErrorInfo error_info = {};
878 E : ::RtlCaptureContext(&error_info.context);
879 E : error_info.access_mode = agent::asan::ASAN_UNKNOWN_ACCESS;
880 E : error_info.location = address;
881 E : error_info.error_type = kind;
882 E : ErrorInfoGetBadAccessInformation(shadow_, stack_cache_, &error_info);
883 E : agent::common::StackCapture stack;
884 E : stack.InitFromStack();
885 E : error_info.crash_stack_id = stack.relative_stack_id();
886 :
887 : // We expect a callback to be set.
888 E : DCHECK(!heap_error_callback_.is_null());
889 E : heap_error_callback_.Run(&error_info);
890 E : }
891 :
892 E : void BlockHeapManager::InitInternalHeap() {
893 E : DCHECK_EQ(static_cast<HeapInterface*>(nullptr), internal_heap_.get());
894 E : DCHECK_EQ(static_cast<HeapInterface*>(nullptr),
895 E : internal_win_heap_.get());
896 :
897 E : internal_win_heap_.reset(new heaps::WinHeap);
898 E : internal_heap_.reset(
899 : new heaps::InternalHeap(memory_notifier_, internal_win_heap_.get()));
900 E : }
901 :
902 E : void BlockHeapManager::InitProcessHeap() {
903 E : DCHECK_EQ(static_cast<BlockHeapInterface*>(nullptr), process_heap_);
904 E : process_heap_underlying_heap_ = new heaps::WinHeap(::GetProcessHeap());
905 E : process_heap_ = new heaps::SimpleBlockHeap(process_heap_underlying_heap_);
906 E : underlying_heaps_map_.insert(std::make_pair(process_heap_,
907 : process_heap_underlying_heap_));
908 E : HeapMetadata heap_metadata = { &shared_quarantine_, false };
909 E : auto result = heaps_.insert(std::make_pair(process_heap_, heap_metadata));
910 E : process_heap_id_ = GetHeapId(result);
911 E : }
912 :
913 E : bool BlockHeapManager::MayUseLargeBlockHeap(size_t bytes) const {
914 E : DCHECK(initialized_);
915 E : if (!parameters_.enable_large_block_heap)
916 E : return false;
917 E : if (bytes >= parameters_.large_allocation_threshold)
918 E : return true;
919 :
920 : // If we get here we're treating a small allocation. If the allocation
921 : // filter is in effect and the flag set then allow it.
922 E : if (parameters_.enable_allocation_filter && allocation_filter_flag())
923 E : return true;
924 :
925 E : return false;
926 E : }
927 :
928 E : bool BlockHeapManager::MayUseZebraBlockHeap(size_t bytes) const {
929 E : DCHECK(initialized_);
930 E : if (!parameters_.enable_zebra_block_heap)
931 E : return false;
932 E : if (bytes > ZebraBlockHeap::kMaximumBlockAllocationSize)
933 E : return false;
934 :
935 : // If the allocation filter is in effect only allow filtered allocations
936 : // into the zebra heap.
937 E : if (parameters_.enable_allocation_filter)
938 E : return allocation_filter_flag();
939 :
940 : // Otherwise, allow everything through.
941 E : return true;
942 E : }
943 :
944 E : bool BlockHeapManager::ShouldReportCorruptBlock(const BlockInfo* block_info) {
945 E : DCHECK_NE(static_cast<const BlockInfo*>(nullptr), block_info);
946 :
947 E : if (!parameters_.prevent_duplicate_corruption_crashes)
948 E : return true;
949 :
950 E : const common::StackCapture* alloc_stack = block_info->header->alloc_stack;
951 E : StackId relative_alloc_stack_id = alloc_stack->relative_stack_id();
952 :
953 : // Look at the registry cache to see if an error has already been reported
954 : // for this allocation stack trace, if so prevent from reporting another one.
955 E : if (corrupt_block_registry_cache_.DoesIdExist(relative_alloc_stack_id))
956 E : return false;
957 :
958 : // Update the corrupt block registry cache to prevent from crashing if we
959 : // encounter a corrupt block that has the same allocation stack trace.
960 E : corrupt_block_registry_cache_.AddOrUpdateStackId(relative_alloc_stack_id);
961 E : return true;
962 E : }
963 :
964 : void BlockHeapManager::TrimOrScheduleIfNecessary(
965 : TrimStatus trim_status,
966 E : BlockQuarantineInterface* quarantine) {
967 : // If no trimming is required, nothing to do.
968 E : if (trim_status == TRIM_NOT_REQUIRED)
969 E : return;
970 :
971 : // If the deferred thread is not running, always trim synchronously.
972 E : if (!IsDeferredFreeThreadRunning()) {
973 E : TrimQuarantine(TrimColor::YELLOW, quarantine);
974 E : return;
975 : }
976 :
977 : // Signal the deferred thread to wake up and/or trim synchronously, as needed.
978 E : if (trim_status & TrimStatusBits::ASYNC_TRIM_REQUIRED)
979 E : DeferredFreeThreadSignalWork();
980 E : if (trim_status & TrimStatusBits::SYNC_TRIM_REQUIRED)
981 i : TrimQuarantine(TrimColor::YELLOW, quarantine);
982 E : }
983 :
984 E : void BlockHeapManager::DeferredFreeThreadSignalWork() {
985 E : DCHECK(IsDeferredFreeThreadRunning());
986 E : base::AutoLock lock(deferred_free_thread_lock_);
987 E : deferred_free_thread_->SignalWork();
988 E : }
989 :
990 E : void BlockHeapManager::DeferredFreeDoWork() {
991 E : DCHECK_EQ(GetDeferredFreeThreadId(), base::PlatformThread::CurrentId());
992 : // As of now, only the shared quarantine gets trimmed asynchronously. This
993 : // will bring it back in the GREEN color.
994 E : BlockQuarantineInterface* shared_quarantine = &shared_quarantine_;
995 E : TrimQuarantine(TrimColor::GREEN, shared_quarantine);
996 E : }
997 :
998 E : base::PlatformThreadId BlockHeapManager::GetDeferredFreeThreadId() {
999 E : DCHECK(IsDeferredFreeThreadRunning());
1000 E : base::AutoLock lock(deferred_free_thread_lock_);
1001 E : return deferred_free_thread_->deferred_free_thread_id();
1002 E : }
1003 :
1004 : void BlockHeapManager::EnableDeferredFreeThreadWithCallback(
1005 E : DeferredFreeThread::Callback deferred_free_callback) {
1006 E : DCHECK(!IsDeferredFreeThreadRunning());
1007 :
1008 E : shared_quarantine_.SetOverbudgetSize(
1009 : shared_quarantine_.max_quarantine_size() * kOverbudgetSizePercentage /
1010 : 100);
1011 :
1012 : // Create the thread and wait for it to start.
1013 E : base::AutoLock lock(deferred_free_thread_lock_);
1014 E : deferred_free_thread_.reset(new DeferredFreeThread(deferred_free_callback));
1015 E : deferred_free_thread_->Start();
1016 E : }
1017 :
1018 : } // namespace heap_managers
1019 : } // namespace asan
1020 : } // namespace agent
|