1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/heap_managers/block_heap_manager.h"
16 :
17 : #include <algorithm>
18 : #include <utility>
19 :
20 : #include "base/bind.h"
21 : #include "base/rand_util.h"
22 : #include "syzygy/agent/asan/asan_runtime.h"
23 : #include "syzygy/agent/asan/page_protection_helpers.h"
24 : #include "syzygy/agent/asan/shadow.h"
25 : #include "syzygy/agent/asan/timed_try.h"
26 : #include "syzygy/agent/asan/heaps/ctmalloc_heap.h"
27 : #include "syzygy/agent/asan/heaps/internal_heap.h"
28 : #include "syzygy/agent/asan/heaps/large_block_heap.h"
29 : #include "syzygy/agent/asan/heaps/simple_block_heap.h"
30 : #include "syzygy/agent/asan/heaps/win_heap.h"
31 : #include "syzygy/agent/asan/heaps/zebra_block_heap.h"
32 : #include "syzygy/common/asan_parameters.h"
33 :
34 : namespace agent {
35 : namespace asan {
36 : namespace heap_managers {
37 :
38 : namespace {
39 :
40 : typedef HeapManagerInterface::HeapId HeapId;
41 : using heaps::LargeBlockHeap;
42 : using heaps::ZebraBlockHeap;
43 :
44 : // Return the position of the most significant bit in a 32 bit unsigned value.
45 E : size_t GetMSBIndex(size_t n) {
46 : // Algorithm taken from
47 : // http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog
48 E : size_t r = 0;
49 E : size_t shift = 0;
50 E : r = (n > 0xFFFF) << 4;
51 E : n >>= r;
52 E : shift = (n > 0xFF) << 3;
53 E : n >>= shift;
54 E : r |= shift;
55 E : shift = (n > 0xF) << 2;
56 E : n >>= shift;
57 E : r |= shift;
58 E : shift = (n > 0x3) << 1;
59 E : n >>= shift;
60 E : r |= shift;
61 E : r |= (n >> 1);
62 E : return r;
63 E : }
64 :
65 : } // namespace
66 :
67 : const size_t BlockHeapManager::kDefaultRateTargetedHeapsMinBlockSize[] =
68 : { 4 * 1024, 15 * 1024 };
69 : const size_t BlockHeapManager::kDefaultRateTargetedHeapsMaxBlockSize[] =
70 : { 9 * 1024, 18 * 1024 };
71 :
72 : BlockHeapManager::BlockHeapManager(StackCaptureCache* stack_cache)
73 : : stack_cache_(stack_cache),
74 : initialized_(false),
75 : process_heap_(nullptr),
76 : process_heap_underlying_heap_(nullptr),
77 : process_heap_id_(0),
78 : zebra_block_heap_(nullptr),
79 : zebra_block_heap_id_(0),
80 : large_block_heap_id_(0),
81 E : locked_heaps_(nullptr) {
82 E : DCHECK_NE(static_cast<StackCaptureCache*>(nullptr), stack_cache);
83 E : SetDefaultAsanParameters(¶meters_);
84 :
85 : // Initialize the allocation-filter flag (using Thread Local Storage).
86 E : allocation_filter_flag_tls_ = ::TlsAlloc();
87 E : CHECK_NE(TLS_OUT_OF_INDEXES, allocation_filter_flag_tls_);
88 : // And disable it by default.
89 E : set_allocation_filter_flag(false);
90 E : }
91 :
92 E : BlockHeapManager::~BlockHeapManager() {
93 E : TearDownHeapManager();
94 E : }
95 :
96 E : void BlockHeapManager::Init() {
97 E : DCHECK(!initialized_);
98 :
99 : {
100 E : base::AutoLock lock(lock_);
101 E : InitInternalHeap();
102 E : }
103 :
104 : // This takes care of its own locking, as its reentrant.
105 E : PropagateParameters();
106 :
107 : {
108 E : base::AutoLock lock(lock_);
109 E : InitProcessHeap();
110 E : initialized_ = true;
111 E : }
112 :
113 E : InitRateTargetedHeaps();
114 E : }
115 :
116 E : HeapId BlockHeapManager::CreateHeap() {
117 E : DCHECK(initialized_);
118 :
119 : // Creates the underlying heap used by this heap.
120 : // TODO(chrisha): We should be using the internal allocator for these
121 : // heap allocations!
122 E : HeapInterface* underlying_heap = nullptr;
123 E : if (parameters_.enable_ctmalloc) {
124 E : underlying_heap = new heaps::CtMallocHeap(&shadow_memory_notifier_);
125 E : } else {
126 E : underlying_heap = new heaps::WinHeap();
127 : }
128 : // Creates the heap.
129 E : BlockHeapInterface* heap = new heaps::SimpleBlockHeap(underlying_heap);
130 :
131 E : base::AutoLock lock(lock_);
132 E : underlying_heaps_map_.insert(std::make_pair(heap, underlying_heap));
133 E : HeapMetadata metadata = { &shared_quarantine_, false };
134 E : auto result = heaps_.insert(std::make_pair(heap, metadata));
135 E : return GetHeapId(result);
136 E : }
137 :
138 E : bool BlockHeapManager::DestroyHeap(HeapId heap_id) {
139 E : DCHECK(initialized_);
140 E : DCHECK(IsValidHeapId(heap_id, false));
141 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
142 E : BlockQuarantineInterface* quarantine = GetQuarantineFromId(heap_id);
143 :
144 : {
145 : // Move the heap from the active to the dying list. This prevents it from
146 : // being used while it's being torn down.
147 E : base::AutoLock lock(lock_);
148 E : auto iter = heaps_.find(heap);
149 E : iter->second.is_dying = true;
150 E : }
151 :
152 : // Destroy the heap and flush its quarantine. This is done outside of the
153 : // lock to both reduce contention and to ensure that we can re-enter the
154 : // block heap manager if corruption is found during the heap tear down.
155 E : DestroyHeapContents(heap, quarantine);
156 :
157 : // Free up any resources associated with the heap. This modifies block
158 : // heap manager internals, so must be called under a lock.
159 : {
160 E : base::AutoLock lock(lock_);
161 E : DestroyHeapResourcesUnlocked(heap, quarantine);
162 E : heaps_.erase(heaps_.find(heap));
163 E : }
164 :
165 E : return true;
166 E : }
167 :
168 E : void* BlockHeapManager::Allocate(HeapId heap_id, size_t bytes) {
169 E : DCHECK(initialized_);
170 E : DCHECK(IsValidHeapId(heap_id, false));
171 :
172 : // Some allocations can pass through without instrumentation.
173 : if (parameters_.allocation_guard_rate < 1.0 &&
174 E : base::RandDouble() >= parameters_.allocation_guard_rate) {
175 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
176 E : void* alloc = heap->Allocate(bytes);
177 : if ((heap->GetHeapFeatures() &
178 E : HeapInterface::kHeapReportsReservations) != 0) {
179 E : Shadow::Unpoison(alloc, bytes);
180 : }
181 E : return alloc;
182 : }
183 :
184 : // Capture the current stack. InitFromStack is inlined to preserve the
185 : // greatest number of stack frames.
186 E : common::StackCapture stack;
187 E : stack.InitFromStack();
188 :
189 : // Build the set of heaps that will be used to satisfy the allocation. This
190 : // is a stack of heaps, and they will be tried in the reverse order they are
191 : // inserted.
192 :
193 : // We can always use the heap that was passed in.
194 E : HeapId heaps[4] = { heap_id, 0, 0, 0 };
195 E : size_t heap_count = 1;
196 E : if (MayUseLargeBlockHeap(bytes)) {
197 E : DCHECK_LT(heap_count, arraysize(heaps));
198 E : heaps[heap_count++] = large_block_heap_id_;
199 : }
200 :
201 E : if (MayUseZebraBlockHeap(bytes)) {
202 E : DCHECK_LT(heap_count, arraysize(heaps));
203 E : heaps[heap_count++] = zebra_block_heap_id_;
204 : }
205 :
206 E : if (MayUseRateTargetedHeap(bytes)) {
207 E : DCHECK_LT(heap_count, arraysize(heaps));
208 E : heaps[heap_count++] = ChooseRateTargetedHeap(stack);
209 : }
210 :
211 : // Use the selected heaps to try to satisfy the allocation.
212 E : void* alloc = nullptr;
213 E : BlockLayout block_layout = {};
214 E : for (int i = static_cast<int>(heap_count) - 1; i >= 0; --i) {
215 E : BlockHeapInterface* heap = GetHeapFromId(heaps[i]);
216 : alloc = heap->AllocateBlock(
217 : bytes,
218 : 0,
219 : parameters_.trailer_padding_size + sizeof(BlockTrailer),
220 E : &block_layout);
221 E : if (alloc != nullptr) {
222 E : heap_id = heaps[i];
223 E : break;
224 : }
225 E : }
226 :
227 : // The allocation can fail if we're out of memory.
228 E : if (alloc == nullptr)
229 E : return nullptr;
230 :
231 E : DCHECK_NE(static_cast<void*>(nullptr), alloc);
232 E : DCHECK_EQ(0u, reinterpret_cast<size_t>(alloc) % kShadowRatio);
233 E : BlockInfo block = {};
234 E : BlockInitialize(block_layout, alloc, false, &block);
235 :
236 : // Poison the redzones in the shadow memory as early as possible.
237 E : Shadow::PoisonAllocatedBlock(block);
238 :
239 E : block.header->alloc_stack = stack_cache_->SaveStackTrace(stack);
240 E : block.header->free_stack = nullptr;
241 E : block.header->state = ALLOCATED_BLOCK;
242 :
243 E : block.trailer->heap_id = heap_id;
244 :
245 E : BlockSetChecksum(block);
246 E : BlockProtectRedzones(block);
247 :
248 E : return block.body;
249 E : }
250 :
251 E : bool BlockHeapManager::Free(HeapId heap_id, void* alloc) {
252 E : DCHECK(initialized_);
253 E : DCHECK(IsValidHeapId(heap_id, false));
254 :
255 : // The standard allows calling free on a null pointer.
256 E : if (alloc == nullptr)
257 E : return true;
258 :
259 E : BlockInfo block_info = {};
260 : if (!Shadow::IsBeginningOfBlockBody(alloc) ||
261 E : !GetBlockInfo(alloc, &block_info)) {
262 E : return FreeUnguardedAlloc(heap_id, alloc);
263 : }
264 :
265 : // Precondition: A valid guarded allocation.
266 E : BlockProtectNone(block_info);
267 :
268 E : if (!BlockChecksumIsValid(block_info)) {
269 : // The free stack hasn't yet been set, but may have been filled with junk.
270 : // Reset it.
271 E : block_info.header->free_stack = nullptr;
272 E : ReportHeapError(alloc, CORRUPT_BLOCK);
273 E : return FreeCorruptBlock(&block_info);
274 : }
275 :
276 E : if (block_info.header->state == QUARANTINED_BLOCK) {
277 E : ReportHeapError(alloc, DOUBLE_FREE);
278 E : return false;
279 : }
280 :
281 : // heap_id is just a hint, the block trailer contains the heap used for the
282 : // allocation.
283 E : heap_id = block_info.trailer->heap_id;
284 E : BlockQuarantineInterface* quarantine = GetQuarantineFromId(heap_id);
285 :
286 : // Poison the released alloc (marked as freed) and quarantine the block.
287 : // Note that the original data is left intact. This may make it easier
288 : // to debug a crash report/dump on access to a quarantined block.
289 E : Shadow::MarkAsFreed(block_info.body, block_info.body_size);
290 :
291 : // We need to update the block's metadata before pushing it into the
292 : // quarantine, otherwise a concurrent thread might try to pop it while its in
293 : // an invalid state.
294 E : common::StackCapture stack;
295 E : stack.InitFromStack();
296 : block_info.header->free_stack =
297 E : stack_cache_->SaveStackTrace(stack);
298 E : block_info.trailer->free_ticks = ::GetTickCount();
299 E : block_info.trailer->free_tid = ::GetCurrentThreadId();
300 E : block_info.header->state = QUARANTINED_BLOCK;
301 :
302 E : BlockSetChecksum(block_info);
303 :
304 E : CompactBlockInfo compact = {};
305 E : ConvertBlockInfo(block_info, &compact);
306 :
307 : {
308 : BlockQuarantineInterface::AutoQuarantineLock quarantine_lock(
309 E : quarantine, compact);
310 E : if (!quarantine->Push(compact))
311 E : return FreePristineBlock(&block_info);
312 :
313 : // The recently pushed block can be popped out in TrimQuarantine if the
314 : // quarantine size is 0, in that case TrimQuarantine takes care of properly
315 : // unprotecting and freeing the block. If the protection is set blindly
316 : // after TrimQuarantine we could end up protecting a free (not quarantined,
317 : // not allocated) block.
318 E : BlockProtectAll(block_info);
319 E : }
320 E : TrimQuarantine(quarantine);
321 E : return true;
322 E : }
323 :
324 E : size_t BlockHeapManager::Size(HeapId heap_id, const void* alloc) {
325 E : DCHECK(initialized_);
326 E : DCHECK(IsValidHeapId(heap_id, false));
327 :
328 E : if (Shadow::IsBeginningOfBlockBody(alloc)) {
329 E : BlockInfo block_info = {};
330 E : if (!GetBlockInfo(alloc, &block_info))
331 i : return 0;
332 E : return block_info.body_size;
333 : }
334 :
335 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
336 : if ((heap->GetHeapFeatures() &
337 E : HeapInterface::kHeapSupportsGetAllocationSize) != 0) {
338 E : return heap->GetAllocationSize(alloc);
339 i : } else {
340 i : return 0;
341 : }
342 E : }
343 :
344 E : void BlockHeapManager::Lock(HeapId heap_id) {
345 E : DCHECK(initialized_);
346 E : DCHECK(IsValidHeapId(heap_id, false));
347 E : GetHeapFromId(heap_id)->Lock();
348 E : }
349 :
350 E : void BlockHeapManager::Unlock(HeapId heap_id) {
351 E : DCHECK(initialized_);
352 E : DCHECK(IsValidHeapId(heap_id, false));
353 E : GetHeapFromId(heap_id)->Unlock();
354 E : }
355 :
356 E : void BlockHeapManager::BestEffortLockAll() {
357 E : DCHECK(initialized_);
358 E : static const base::TimeDelta kTryTime(base::TimeDelta::FromMilliseconds(50));
359 E : lock_.Acquire();
360 :
361 : // Create room to store the list of locked heaps. This must use the internal
362 : // heap as any other heap may be involved in a crash and locked right now.
363 E : DCHECK_EQ(static_cast<HeapInterface**>(nullptr), locked_heaps_);
364 E : size_t alloc_size = sizeof(HeapInterface*) * (heaps_.size() + 1);
365 : locked_heaps_ = reinterpret_cast<HeapInterface**>(internal_heap_->Allocate(
366 E : alloc_size));
367 E : DCHECK_NE(static_cast<HeapInterface**>(nullptr), locked_heaps_);
368 E : ::memset(locked_heaps_, 0, alloc_size);
369 :
370 E : size_t index = 0;
371 E : for (auto& heap_quarantine_pair : heaps_) {
372 E : HeapInterface* heap = heap_quarantine_pair.first;
373 E : if (TimedTry(kTryTime, heap)) {
374 E : locked_heaps_[index] = heap;
375 E : ++index;
376 : }
377 E : }
378 E : }
379 :
380 E : void BlockHeapManager::UnlockAll() {
381 E : DCHECK(initialized_);
382 E : lock_.AssertAcquired();
383 E : DCHECK_NE(static_cast<HeapInterface**>(nullptr), locked_heaps_);
384 E : for (HeapInterface** heap = locked_heaps_; *heap != nullptr; ++heap)
385 E : (*heap)->Unlock();
386 E : internal_heap_->Free(locked_heaps_);
387 E : locked_heaps_ = nullptr;
388 E : lock_.Release();
389 E : }
390 :
391 : void BlockHeapManager::set_parameters(
392 E : const ::common::AsanParameters& parameters) {
393 : // Once initialized we can't tolerate changes to enable_ctmalloc, as the
394 : // internal heap and process heap would have to be reinitialized.
395 : DCHECK(!initialized_ ||
396 E : parameters_.enable_ctmalloc == parameters.enable_ctmalloc);
397 :
398 : {
399 E : base::AutoLock lock(lock_);
400 E : parameters_ = parameters;
401 E : }
402 :
403 : // Releases the lock before propagating the parameters.
404 E : if (initialized_)
405 E : PropagateParameters();
406 E : }
407 :
408 E : void BlockHeapManager::TearDownHeapManager() {
409 E : base::AutoLock lock(lock_);
410 :
411 : // This would indicate that we have outstanding heap locks being
412 : // held. This shouldn't happen as |locked_heaps_| is only non-null
413 : // under |lock_|.
414 E : DCHECK_EQ(static_cast<HeapInterface**>(nullptr), locked_heaps_);
415 :
416 : // Delete all the heaps. This must be done manually to ensure that
417 : // all references to internal_heap_ have been cleaned up.
418 E : HeapQuarantineMap::iterator iter_heaps = heaps_.begin();
419 E : for (; iter_heaps != heaps_.end(); ++iter_heaps) {
420 E : DCHECK(!iter_heaps->second.is_dying);
421 E : iter_heaps->second.is_dying = true;
422 E : DestroyHeapContents(iter_heaps->first, iter_heaps->second.quarantine);
423 : DestroyHeapResourcesUnlocked(iter_heaps->first,
424 E : iter_heaps->second.quarantine);
425 E : }
426 : // Clear the active heap list.
427 E : heaps_.clear();
428 :
429 : // Clear the specialized heap references since they were deleted.
430 E : process_heap_ = nullptr;
431 E : process_heap_underlying_heap_ = nullptr;
432 E : process_heap_id_ = 0;
433 E : zebra_block_heap_ = nullptr;
434 E : zebra_block_heap_id_ = 0;
435 E : large_block_heap_id_ = 0;
436 : {
437 E : base::AutoLock lock(targeted_heaps_info_lock_);
438 E : for (size_t i = 0; i < kRateTargetedHeapCount; ++i) {
439 E : rate_targeted_heaps_[i] = 0;
440 E : rate_targeted_heaps_count_[i] = 0;
441 E : }
442 E : }
443 :
444 : // Free the allocation-filter flag (TLS).
445 E : if (allocation_filter_flag_tls_ != TLS_OUT_OF_INDEXES) {
446 E : ::TlsFree(allocation_filter_flag_tls_);
447 E : allocation_filter_flag_tls_ = TLS_OUT_OF_INDEXES;
448 : }
449 E : }
450 :
451 : HeapId BlockHeapManager::GetHeapId(
452 E : HeapQuarantineMap::iterator iterator) const {
453 E : HeapQuarantinePair* hq_pair = &(*iterator);
454 E : return reinterpret_cast<HeapId>(hq_pair);
455 E : }
456 :
457 : HeapId BlockHeapManager::GetHeapId(
458 E : const std::pair<HeapQuarantineMap::iterator, bool>& insert_result) const {
459 E : return GetHeapId(insert_result.first);
460 E : }
461 :
462 : bool BlockHeapManager::IsValidHeapIdUnsafe(HeapId heap_id, bool allow_dying) {
463 : DCHECK(initialized_);
464 : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
465 : if (!IsValidHeapIdUnsafeUnlockedImpl1(hq))
466 : return false;
467 : base::AutoLock auto_lock(lock_);
468 : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
469 : return false;
470 : return true;
471 : }
472 :
473 : bool BlockHeapManager::IsValidHeapIdUnsafeUnlocked(
474 : HeapId heap_id, bool allow_dying) {
475 : DCHECK(initialized_);
476 : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
477 : if (!IsValidHeapIdUnsafeUnlockedImpl1(hq))
478 : return false;
479 : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
480 : return false;
481 : return true;
482 : }
483 :
484 E : bool BlockHeapManager::IsValidHeapId(HeapId heap_id, bool allow_dying) {
485 E : DCHECK(initialized_);
486 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
487 E : if (!IsValidHeapIdUnlockedImpl1(hq))
488 i : return false;
489 E : base::AutoLock auto_lock(lock_);
490 E : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
491 i : return false;
492 E : return true;
493 E : }
494 :
495 E : bool BlockHeapManager::IsValidHeapIdUnlocked(HeapId heap_id, bool allow_dying) {
496 E : DCHECK(initialized_);
497 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
498 E : if (!IsValidHeapIdUnlockedImpl1(hq))
499 E : return false;
500 E : if (!IsValidHeapIdUnlockedImpl2(hq, allow_dying))
501 E : return false;
502 E : return true;
503 E : }
504 :
505 : bool BlockHeapManager::IsValidHeapIdUnsafeUnlockedImpl1(
506 E : HeapQuarantinePair* hq) {
507 : // First check to see if it looks like it has the right shape. This could
508 : // cause an invalid access if the heap_id is completely a wild value.
509 E : if (hq == nullptr)
510 i : return false;
511 E : if (hq->first == nullptr || hq->second.quarantine == nullptr)
512 i : return false;
513 E : return true;
514 E : }
515 :
516 : bool BlockHeapManager::IsValidHeapIdUnlockedImpl1(
517 i : HeapQuarantinePair* hq) {
518 : // Run this in an exception handler, as if it's a really invalid heap id
519 : // we could end up reading from inaccessible memory.
520 i : __try {
521 i : if (!IsValidHeapIdUnsafeUnlockedImpl1(hq))
522 i : return false;
523 i : } __except(EXCEPTION_EXECUTE_HANDLER) {
524 i : return false;
525 i : }
526 i : return true;
527 i : }
528 :
529 : bool BlockHeapManager::IsValidHeapIdUnlockedImpl2(HeapQuarantinePair* hq,
530 E : bool allow_dying) {
531 : // Look in the list of live heaps first.
532 E : auto it = heaps_.find(hq->first);
533 E : if (it != heaps_.end()) {
534 E : HeapId heap_id = GetHeapId(it);
535 E : if (heap_id == reinterpret_cast<HeapId>(hq))
536 E : return !it->second.is_dying || allow_dying;
537 : }
538 :
539 E : return false;
540 E : }
541 :
542 E : BlockHeapInterface* BlockHeapManager::GetHeapFromId(HeapId heap_id) {
543 E : DCHECK_NE(reinterpret_cast<HeapId>(nullptr), heap_id);
544 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
545 E : DCHECK_NE(static_cast<BlockHeapInterface*>(nullptr), hq->first);
546 E : return hq->first;
547 E : }
548 :
549 : BlockQuarantineInterface* BlockHeapManager::GetQuarantineFromId(
550 E : HeapId heap_id) {
551 E : DCHECK_NE(reinterpret_cast<HeapId>(nullptr), heap_id);
552 E : HeapQuarantinePair* hq = reinterpret_cast<HeapQuarantinePair*>(heap_id);
553 : DCHECK_NE(static_cast<BlockQuarantineInterface*>(nullptr),
554 E : hq->second.quarantine);
555 E : return hq->second.quarantine;
556 E : }
557 :
558 E : void BlockHeapManager::PropagateParameters() {
559 : // The internal heap should already be setup.
560 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), internal_heap_.get());
561 :
562 E : size_t quarantine_size = shared_quarantine_.max_quarantine_size();
563 E : shared_quarantine_.set_max_quarantine_size(parameters_.quarantine_size);
564 E : shared_quarantine_.set_max_object_size(parameters_.quarantine_block_size);
565 :
566 : // Trim the quarantine if its maximum size has decreased.
567 E : if (initialized_ && quarantine_size > parameters_.quarantine_size)
568 E : TrimQuarantine(&shared_quarantine_);
569 :
570 E : if (parameters_.enable_zebra_block_heap && zebra_block_heap_ == nullptr) {
571 : // Initialize the zebra heap only if it isn't already initialized.
572 : // The zebra heap cannot be resized once created.
573 E : base::AutoLock lock(lock_);
574 : zebra_block_heap_ = new ZebraBlockHeap(parameters_.zebra_block_heap_size,
575 : &shadow_memory_notifier_,
576 E : internal_heap_.get());
577 : // The zebra block heap is its own quarantine.
578 E : HeapMetadata heap_metadata = { zebra_block_heap_, false };
579 : auto result = heaps_.insert(std::make_pair(zebra_block_heap_,
580 E : heap_metadata));
581 E : zebra_block_heap_id_ = GetHeapId(result);
582 E : }
583 :
584 E : if (zebra_block_heap_ != nullptr) {
585 : zebra_block_heap_->set_quarantine_ratio(
586 E : parameters_.zebra_block_heap_quarantine_ratio);
587 E : if (initialized_)
588 E : TrimQuarantine(zebra_block_heap_);
589 : }
590 :
591 : // Create the LargeBlockHeap if need be.
592 E : if (parameters_.enable_large_block_heap && large_block_heap_id_ == 0) {
593 E : base::AutoLock lock(lock_);
594 E : BlockHeapInterface* heap = new LargeBlockHeap(internal_heap_.get());
595 E : HeapMetadata metadata = { &shared_quarantine_, false };
596 E : auto result = heaps_.insert(std::make_pair(heap, metadata));
597 E : large_block_heap_id_ = GetHeapId(result);
598 E : }
599 :
600 : // TODO(chrisha|sebmarchand): Clean up existing blocks that exceed the
601 : // maximum block size? This will require an entirely new TrimQuarantine
602 : // function. Since this is never changed at runtime except in our
603 : // unittests, this is not clearly useful.
604 E : }
605 :
606 E : bool BlockHeapManager::allocation_filter_flag() const {
607 E : return reinterpret_cast<bool>(::TlsGetValue(allocation_filter_flag_tls_));
608 E : }
609 :
610 E : void BlockHeapManager::set_allocation_filter_flag(bool value) {
611 E : ::TlsSetValue(allocation_filter_flag_tls_, reinterpret_cast<void*>(value));
612 E : }
613 :
614 E : HeapType BlockHeapManager::GetHeapTypeUnlocked(HeapId heap_id) {
615 E : DCHECK(initialized_);
616 E : DCHECK(IsValidHeapIdUnlocked(heap_id, true));
617 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
618 E : return heap->GetHeapType();
619 E : }
620 :
621 : bool BlockHeapManager::DestroyHeapContents(
622 : BlockHeapInterface* heap,
623 E : BlockQuarantineInterface* quarantine) {
624 E : DCHECK(initialized_);
625 E : DCHECK_NE(static_cast<BlockHeapInterface*>(nullptr), heap);
626 E : DCHECK_NE(static_cast<BlockQuarantineInterface*>(nullptr), quarantine);
627 :
628 : // Starts by removing all the block from this heap from the quarantine.
629 E : BlockQuarantineInterface::ObjectVector blocks_vec;
630 E : BlockQuarantineInterface::ObjectVector blocks_to_free;
631 :
632 : // We'll keep the blocks that don't belong to this heap in a temporary list.
633 : // While this isn't optimal in terms of performance, destroying a heap isn't a
634 : // common operation.
635 : // TODO(sebmarchand): Add a version of the ShardedBlockQuarantine::Empty
636 : // method that accepts a functor to filter the blocks to remove.
637 E : BlockQuarantineInterface::ObjectVector blocks_to_reinsert;
638 E : quarantine->Empty(&blocks_vec);
639 :
640 E : for (const auto& iter_block : blocks_vec) {
641 E : BlockInfo expanded = {};
642 E : ConvertBlockInfo(iter_block, &expanded);
643 :
644 : // Remove protection to enable access to the block header.
645 E : BlockProtectNone(expanded);
646 :
647 E : BlockHeapInterface* block_heap = GetHeapFromId(expanded.trailer->heap_id);
648 :
649 E : if (block_heap == heap) {
650 E : blocks_to_free.push_back(iter_block);
651 E : } else {
652 E : blocks_to_reinsert.push_back(iter_block);
653 : }
654 E : }
655 :
656 : // Restore the blocks that don't belong to this quarantine.
657 E : for (const auto& iter_block : blocks_to_reinsert) {
658 E : BlockInfo expanded = {};
659 E : ConvertBlockInfo(iter_block, &expanded);
660 :
661 : BlockQuarantineInterface::AutoQuarantineLock quarantine_lock(quarantine,
662 E : iter_block);
663 E : if (quarantine->Push(iter_block)) {
664 : // Restore protection to quarantined block.
665 E : BlockProtectAll(expanded);
666 E : } else {
667 : // Avoid memory leak.
668 i : blocks_to_free.push_back(iter_block);
669 : }
670 E : }
671 :
672 E : FreeBlockVector(blocks_to_free);
673 :
674 E : return true;
675 E : }
676 :
677 : void BlockHeapManager::DestroyHeapResourcesUnlocked(
678 : BlockHeapInterface* heap,
679 E : BlockQuarantineInterface* quarantine) {
680 : // If the heap has an underlying heap then free it as well.
681 : {
682 E : auto iter = underlying_heaps_map_.find(heap);
683 E : if (iter != underlying_heaps_map_.end()) {
684 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), iter->second);
685 E : delete iter->second;
686 E : underlying_heaps_map_.erase(iter);
687 : }
688 : }
689 :
690 E : delete heap;
691 E : }
692 :
693 E : void BlockHeapManager::TrimQuarantine(BlockQuarantineInterface* quarantine) {
694 E : DCHECK(initialized_);
695 E : DCHECK_NE(static_cast<BlockQuarantineInterface*>(nullptr), quarantine);
696 :
697 E : BlockQuarantineInterface::ObjectVector blocks_to_free;
698 :
699 : // Trim the quarantine to the new maximum size.
700 E : if (parameters_.quarantine_size == 0) {
701 i : quarantine->Empty(&blocks_to_free);
702 i : } else {
703 E : CompactBlockInfo compact = {};
704 E : while (quarantine->Pop(&compact))
705 E : blocks_to_free.push_back(compact);
706 : }
707 :
708 E : FreeBlockVector(blocks_to_free);
709 E : }
710 :
711 : void BlockHeapManager::FreeBlockVector(
712 E : BlockQuarantineInterface::ObjectVector& vec) {
713 E : for (const auto& iter_block : vec) {
714 E : BlockInfo expanded = {};
715 E : ConvertBlockInfo(iter_block, &expanded);
716 E : CHECK(FreePotentiallyCorruptBlock(&expanded));
717 E : }
718 E : }
719 :
720 E : bool BlockHeapManager::FreePotentiallyCorruptBlock(BlockInfo* block_info) {
721 E : DCHECK(initialized_);
722 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
723 :
724 E : BlockProtectNone(*block_info);
725 :
726 : if (block_info->header->magic != kBlockHeaderMagic ||
727 E : !BlockChecksumIsValid(*block_info)) {
728 E : ReportHeapError(block_info->block, CORRUPT_BLOCK);
729 E : return FreeCorruptBlock(block_info);
730 i : } else {
731 E : return FreePristineBlock(block_info);
732 : }
733 E : }
734 :
735 E : bool BlockHeapManager::FreeCorruptBlock(BlockInfo* block_info) {
736 E : DCHECK(initialized_);
737 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
738 E : ClearCorruptBlockMetadata(block_info);
739 E : return FreePristineBlock(block_info);
740 E : }
741 :
742 E : bool BlockHeapManager::FreePristineBlock(BlockInfo* block_info) {
743 E : DCHECK(initialized_);
744 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
745 E : BlockHeapInterface* heap = GetHeapFromId(block_info->trailer->heap_id);
746 :
747 : // Remove block protections so the redzones may be modified.
748 E : BlockProtectNone(*block_info);
749 :
750 : // Return pointers to the stacks for reference counting purposes.
751 E : if (block_info->header->alloc_stack != nullptr) {
752 E : stack_cache_->ReleaseStackTrace(block_info->header->alloc_stack);
753 E : block_info->header->alloc_stack = nullptr;
754 : }
755 E : if (block_info->header->free_stack != nullptr) {
756 E : stack_cache_->ReleaseStackTrace(block_info->header->free_stack);
757 E : block_info->header->free_stack = nullptr;
758 : }
759 :
760 E : block_info->header->state = FREED_BLOCK;
761 :
762 : if ((heap->GetHeapFeatures() &
763 E : HeapInterface::kHeapReportsReservations) != 0) {
764 : Shadow::Poison(block_info->block, block_info->block_size,
765 E : kAsanReservedMarker);
766 E : } else {
767 E : Shadow::Unpoison(block_info->block, block_info->block_size);
768 : }
769 E : return heap->FreeBlock(*block_info);
770 E : }
771 :
772 E : bool BlockHeapManager::FreeUnguardedAlloc(HeapId heap_id, void* alloc) {
773 E : DCHECK(initialized_);
774 E : DCHECK(IsValidHeapId(heap_id, false));
775 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
776 :
777 : // Check if the allocation comes from the process heap, if so there's two
778 : // possibilities:
779 : // - If CTMalloc is enabled the process heap underlying heap is a CTMalloc
780 : // heap. In this case we can explicitly check if the allocation was made
781 : // via the CTMalloc process heap.
782 : // - CTMalloc is disabled and in this case the process heap underlying heap
783 : // is always the real process heap.
784 : if (heap == process_heap_ &&
785 E : (!parameters_.enable_ctmalloc || !heap->IsAllocated(alloc))) {
786 : // The shadow memory associated with this allocation is already green, so
787 : // no need to modify it.
788 E : return ::HeapFree(::GetProcessHeap(), 0, alloc) == TRUE;
789 : }
790 :
791 : // If the heap carves greenzones out of redzones, then color the allocation
792 : // red again. Otherwise, simply leave it green.
793 : if ((heap->GetHeapFeatures() &
794 E : HeapInterface::kHeapReportsReservations) != 0) {
795 : DCHECK_NE(0U, heap->GetHeapFeatures() &
796 E : HeapInterface::kHeapSupportsGetAllocationSize);
797 : Shadow::Poison(alloc,
798 : Size(heap_id, alloc),
799 E : kAsanReservedMarker);
800 : }
801 :
802 E : return heap->Free(alloc);
803 E : }
804 :
805 E : void BlockHeapManager::ClearCorruptBlockMetadata(BlockInfo* block_info) {
806 E : DCHECK(initialized_);
807 E : DCHECK_NE(static_cast<BlockInfo*>(nullptr), block_info);
808 E : DCHECK_NE(static_cast<BlockHeader*>(nullptr), block_info->header);
809 :
810 : // Set the invalid stack captures to nullptr.
811 : if (!stack_cache_->StackCapturePointerIsValid(
812 E : block_info->header->alloc_stack)) {
813 i : block_info->header->alloc_stack = nullptr;
814 : }
815 : if (!stack_cache_->StackCapturePointerIsValid(
816 E : block_info->header->free_stack)) {
817 E : block_info->header->free_stack = nullptr;
818 : }
819 E : }
820 :
821 E : void BlockHeapManager::ReportHeapError(void* address, BadAccessKind kind) {
822 E : DCHECK(initialized_);
823 E : DCHECK_NE(static_cast<void*>(nullptr), address);
824 :
825 : // Collect information about the error.
826 E : AsanErrorInfo error_info = {};
827 E : ::RtlCaptureContext(&error_info.context);
828 E : error_info.access_mode = agent::asan::ASAN_UNKNOWN_ACCESS;
829 E : error_info.location = address;
830 E : error_info.error_type = kind;
831 E : ErrorInfoGetBadAccessInformation(stack_cache_, &error_info);
832 E : agent::common::StackCapture stack;
833 E : stack.InitFromStack();
834 E : error_info.crash_stack_id = stack.ComputeRelativeStackId();
835 :
836 : // We expect a callback to be set.
837 E : DCHECK(!heap_error_callback_.is_null());
838 E : heap_error_callback_.Run(&error_info);
839 E : }
840 :
841 E : void BlockHeapManager::InitInternalHeap() {
842 E : DCHECK_EQ(static_cast<HeapInterface*>(nullptr), internal_heap_.get());
843 : DCHECK_EQ(static_cast<HeapInterface*>(nullptr),
844 E : internal_win_heap_.get());
845 :
846 E : if (parameters_.enable_ctmalloc) {
847 : internal_heap_.reset(
848 E : new heaps::CtMallocHeap(&shadow_memory_notifier_));
849 E : } else {
850 E : internal_win_heap_.reset(new heaps::WinHeap);
851 : internal_heap_.reset(new heaps::InternalHeap(&shadow_memory_notifier_,
852 E : internal_win_heap_.get()));
853 : }
854 E : }
855 :
856 E : void BlockHeapManager::InitProcessHeap() {
857 E : DCHECK_EQ(static_cast<BlockHeapInterface*>(nullptr), process_heap_);
858 E : if (parameters_.enable_ctmalloc) {
859 : process_heap_underlying_heap_ =
860 E : new heaps::CtMallocHeap(&shadow_memory_notifier_);
861 E : } else {
862 E : process_heap_underlying_heap_ = new heaps::WinHeap(::GetProcessHeap());
863 : }
864 E : process_heap_ = new heaps::SimpleBlockHeap(process_heap_underlying_heap_);
865 : underlying_heaps_map_.insert(std::make_pair(process_heap_,
866 E : process_heap_underlying_heap_));
867 E : HeapMetadata heap_metadata = { &shared_quarantine_, false };
868 E : auto result = heaps_.insert(std::make_pair(process_heap_, heap_metadata));
869 E : process_heap_id_ = GetHeapId(result);
870 E : }
871 :
872 E : void BlockHeapManager::InitRateTargetedHeaps() {
873 : // |kRateTargetedHeapCount| should be <= 32 because of the way the logarithm
874 : // taking works.
875 E : if (!parameters_.enable_rate_targeted_heaps) {
876 E : ::memset(rate_targeted_heaps_, 0, sizeof(rate_targeted_heaps_));
877 : ::memset(rate_targeted_heaps_count_, 0,
878 E : sizeof(rate_targeted_heaps_count_));
879 E : return;
880 : }
881 :
882 : COMPILE_ASSERT(kRateTargetedHeapCount <= 32,
883 : rate_targeted_heap_count_should_is_too_high);
884 E : for (size_t i = 0; i < kRateTargetedHeapCount; ++i) {
885 E : rate_targeted_heaps_[i] = CreateHeap();
886 E : rate_targeted_heaps_count_[i] = 0;
887 E : }
888 E : }
889 :
890 E : bool BlockHeapManager::MayUseLargeBlockHeap(size_t bytes) const {
891 E : DCHECK(initialized_);
892 E : if (!parameters_.enable_large_block_heap)
893 E : return false;
894 E : if (bytes >= parameters_.large_allocation_threshold)
895 E : return true;
896 :
897 : // If we get here we're treating a small allocation. If the allocation
898 : // filter is in effect and the flag set then allow it.
899 E : if (parameters_.enable_allocation_filter && allocation_filter_flag())
900 E : return true;
901 :
902 E : return false;
903 E : }
904 :
905 E : bool BlockHeapManager::MayUseZebraBlockHeap(size_t bytes) const {
906 E : DCHECK(initialized_);
907 E : if (!parameters_.enable_zebra_block_heap)
908 E : return false;
909 E : if (bytes > ZebraBlockHeap::kMaximumBlockAllocationSize)
910 E : return false;
911 :
912 : // If the allocation filter is in effect only allow filtered allocations
913 : // into the zebra heap.
914 E : if (parameters_.enable_allocation_filter)
915 E : return allocation_filter_flag();
916 :
917 : // Otherwise, allow everything through.
918 E : return true;
919 E : }
920 :
921 E : bool BlockHeapManager::MayUseRateTargetedHeap(size_t bytes) const {
922 E : DCHECK(initialized_);
923 E : if (!parameters_.enable_rate_targeted_heaps)
924 E : return false;
925 : if (bytes <= kDefaultRateTargetedHeapsMaxBlockSize[0] &&
926 E : bytes >= kDefaultRateTargetedHeapsMinBlockSize[0]) {
927 E : return true;
928 : }
929 : if (bytes <= kDefaultRateTargetedHeapsMaxBlockSize[1] &&
930 E : bytes >= kDefaultRateTargetedHeapsMinBlockSize[1]) {
931 E : return true;
932 : }
933 E : return false;
934 E : }
935 :
936 : HeapId BlockHeapManager::ChooseRateTargetedHeap(
937 E : const agent::common::StackCapture& stack) {
938 E : AllocationRateInfo::AllocationSiteCountMap::iterator iter;
939 : {
940 E : base::AutoLock lock(targeted_heaps_info_lock_);
941 :
942 : // Insert the current stack into the map that tracks how many times each
943 : // allocation stack has been encountered, increment the frequency if it's
944 : // already present.
945 : iter = targeted_heaps_info_.allocation_site_count_map.find(
946 E : stack.stack_id());
947 E : if (iter == targeted_heaps_info_.allocation_site_count_map.end()) {
948 : iter = targeted_heaps_info_.allocation_site_count_map.insert(
949 E : std::make_pair(stack.stack_id(), 1)).first;
950 E : targeted_heaps_info_.allocation_site_count_min = 1;
951 E : } else {
952 E : if (targeted_heaps_info_.allocation_site_count_min == iter->second)
953 E : targeted_heaps_info_.allocation_site_count_min++;
954 E : iter->second++;
955 : }
956 E : }
957 :
958 : // Track the minimum and the maximum value of the allocation sites frequency.
959 : // This is both lazy and racy. However, due to the atomic nature of the
960 : // reads or writes, the values will track the true values quite closely.
961 E : if (iter->second > targeted_heaps_info_.allocation_site_count_max)
962 E : targeted_heaps_info_.allocation_site_count_max = iter->second;
963 E : if (iter->second < targeted_heaps_info_.allocation_site_count_min)
964 E : targeted_heaps_info_.allocation_site_count_min = iter->second;
965 :
966 : // Because of the racy updates to min and max, grab local copies of them.
967 E : size_t min = targeted_heaps_info_.allocation_site_count_min;
968 E : size_t max = targeted_heaps_info_.allocation_site_count_max;
969 :
970 : // Cap the current count to the min/max estimates.
971 E : size_t current_count = std::max(min, iter->second);
972 E : current_count = std::min(max, current_count) - min;
973 :
974 : // Calculates the logarithm of the allocation sites minimum and maximum
975 : // values. Then chop this space into |kRateTargetedHeapsCount| buckets.
976 E : size_t current_count_msb = GetMSBIndex(current_count);
977 E : size_t width_msb = GetMSBIndex(max - min);
978 E : size_t bucket = current_count_msb * kRateTargetedHeapCount / (width_msb + 1);
979 E : DCHECK_GT(kRateTargetedHeapCount, bucket);
980 :
981 E : rate_targeted_heaps_count_[bucket]++;
982 E : return rate_targeted_heaps_[bucket];
983 E : }
984 :
985 : } // namespace heap_managers
986 : } // namespace asan
987 : } // namespace agent
|