1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/heap_managers/block_heap_manager.h"
16 :
17 : #include <vector>
18 :
19 : #include "base/bind.h"
20 : #include "base/compiler_specific.h"
21 : #include "base/rand_util.h"
22 : #include "base/sha1.h"
23 : #include "base/debug/alias.h"
24 : #include "base/synchronization/condition_variable.h"
25 : #include "base/synchronization/lock.h"
26 : #include "base/synchronization/waitable_event.h"
27 : #include "base/test/test_reg_util_win.h"
28 : #include "base/threading/simple_thread.h"
29 : #include "gmock/gmock.h"
30 : #include "gtest/gtest.h"
31 : #include "syzygy/agent/asan/block.h"
32 : #include "syzygy/agent/asan/heap.h"
33 : #include "syzygy/agent/asan/page_protection_helpers.h"
34 : #include "syzygy/agent/asan/rtl_impl.h"
35 : #include "syzygy/agent/asan/runtime.h"
36 : #include "syzygy/agent/asan/stack_capture_cache.h"
37 : #include "syzygy/agent/asan/unittest_util.h"
38 : #include "syzygy/agent/asan/heaps/internal_heap.h"
39 : #include "syzygy/agent/asan/heaps/large_block_heap.h"
40 : #include "syzygy/agent/asan/heaps/simple_block_heap.h"
41 : #include "syzygy/agent/asan/heaps/win_heap.h"
42 : #include "syzygy/agent/asan/heaps/zebra_block_heap.h"
43 : #include "syzygy/agent/asan/memory_notifiers/null_memory_notifier.h"
44 : #include "syzygy/assm/assembler.h"
45 : #include "syzygy/assm/buffer_serializer.h"
46 : #include "syzygy/common/asan_parameters.h"
47 :
48 : namespace agent {
49 : namespace asan {
50 : namespace heap_managers {
51 :
52 : namespace {
53 :
54 : using heaps::ZebraBlockHeap;
55 : using testing::IsAccessible;
56 : using testing::IsNotAccessible;
57 : using testing::ScopedBlockAccess;
58 :
59 : typedef BlockHeapManager::HeapId HeapId;
60 :
61 E : testing::DummyHeap dummy_heap;
62 :
63 : // As the code that computes the relative stack IDs ignores any frames from
64 : // its own module and as we statically link with the SyzyAsan CRT, all the
65 : // allocations or crashes coming from these tests will have the same
66 : // relative stack ID by default. To fix this we dynamically generate code that
67 : // does the allocation. We then use the ComputeRelativeStackId seam to indicate
68 : // that the frame is in an entirely different dummy module.
69 : class AllocateFromHeapManagerHelper {
70 : public:
71 E : AllocateFromHeapManagerHelper(BlockHeapManager* heap_manager,
72 : HeapId heap_id,
73 : size_t offset)
74 E : : heap_manager_(heap_manager), heap_id_(heap_id), offset_(offset) {
75 E : DCHECK_NE(static_cast<BlockHeapManager*>(nullptr), heap_manager);
76 E : DCHECK_LT(offset, GetPageSize());
77 :
78 : // Allocates a page that has the executable bit set.
79 E : allocation_code_page_ = ::VirtualAlloc(nullptr, GetPageSize(),
80 : MEM_COMMIT, PAGE_EXECUTE_READWRITE);
81 E : EXPECT_NE(nullptr, allocation_code_page_);
82 :
83 E : assm::BufferSerializer bs(
84 : reinterpret_cast<uint8_t*>(allocation_code_page_) + offset,
85 : GetPageSize() - offset);
86 E : assm::AssemblerImpl assembler(
87 : reinterpret_cast<uint32_t>(allocation_code_page_) + offset, &bs);
88 :
89 E : assembler.push(assm::ebp);
90 E : assembler.mov(assm::ebp, assm::esp);
91 :
92 : // Push the parameters on the stack.
93 E : assembler.push(assm::AssemblerImpl::Operand(assm::ebp,
94 : assm::AssemblerImpl::Displacement(0x10, assm::kSize8Bit)));
95 E : assembler.push(assm::AssemblerImpl::Operand(assm::ebp,
96 : assm::AssemblerImpl::Displacement(0x0C, assm::kSize8Bit)));
97 E : assembler.push(assm::AssemblerImpl::Operand(assm::ebp,
98 : assm::AssemblerImpl::Displacement(0x08, assm::kSize8Bit)));
99 :
100 : // Call the AllocateFromHeapManager function.
101 E : assembler.call(assm::AssemblerImpl::Immediate(
102 : reinterpret_cast<uint32_t>(&AllocateFromHeapManager), assm::kSize32Bit,
103 : NULL));
104 E : assembler.mov(assm::esp, assm::ebp);
105 E : assembler.pop(assm::ebp);
106 E : assembler.ret();
107 :
108 E : agent::common::StackCapture::AddFalseModule(
109 : "dummy_module.dll", allocation_code_page_, GetPageSize());
110 E : }
111 :
112 E : ~AllocateFromHeapManagerHelper() {
113 E : EXPECT_TRUE(::VirtualFree(allocation_code_page_, 0, MEM_RELEASE));
114 E : allocation_code_page_ = nullptr;
115 E : agent::common::StackCapture::ClearFalseModules();
116 E : }
117 :
118 E : void* operator()(size_t bytes) {
119 : using AllocFunctionPtr = void*(*)(BlockHeapManager* heap_manager,
120 : HeapId heap_id,
121 : size_t bytes);
122 E : uint8_t* func = reinterpret_cast<uint8_t*>(allocation_code_page_) + offset_;
123 E : return reinterpret_cast<AllocFunctionPtr>(func)(
124 : heap_manager_, heap_id_, bytes);
125 E : }
126 :
127 : private:
128 : // Do an allocation via a heap manager.
129 : static void* AllocateFromHeapManager(BlockHeapManager* heap_manager,
130 : HeapId heap_id,
131 E : size_t bytes) {
132 E : EXPECT_NE(nullptr, heap_manager);
133 E : return heap_manager->Allocate(heap_id, bytes);
134 E : }
135 :
136 : // The page that contains the dynamically generated code that does an
137 : // allocation via a heap manager.
138 : LPVOID allocation_code_page_;
139 :
140 : // The heap that serves the allocation.
141 : HeapId heap_id_;
142 :
143 : // The heap manager that owns the heap.
144 : BlockHeapManager* heap_manager_;
145 :
146 : // The offset within the page where the function starts. Different values of
147 : // this will cause different relative stack ID values.
148 : size_t offset_;
149 : };
150 :
151 : // A fake ZebraBlockHeap to simplify unit testing.
152 : // Wrapper with switches to enable/disable the quarantine and accept/refuse
153 : // allocations.
154 : class TestZebraBlockHeap : public heaps::ZebraBlockHeap {
155 : public:
156 : using ZebraBlockHeap::set_quarantine_ratio;
157 : using ZebraBlockHeap::quarantine_ratio;
158 : using ZebraBlockHeap::slab_count_;
159 :
160 : // Constructor.
161 E : explicit TestZebraBlockHeap(MemoryNotifierInterface* memory_notifier)
162 E : : ZebraBlockHeap(1024 * 1024, memory_notifier, &dummy_heap) {
163 E : refuse_allocations_ = false;
164 E : refuse_push_ = false;
165 E : }
166 :
167 : // Virtual destructor.
168 E : virtual ~TestZebraBlockHeap() { }
169 :
170 : // Wrapper that allows easily disabling allocations.
171 : void* AllocateBlock(size_t size,
172 : size_t min_left_redzone_size,
173 : size_t min_right_redzone_size,
174 E : BlockLayout* layout) override {
175 E : if (refuse_allocations_)
176 E : return nullptr;
177 E : return ZebraBlockHeap::AllocateBlock(size,
178 : min_left_redzone_size,
179 : min_right_redzone_size,
180 : layout);
181 E : }
182 :
183 : // Wrapper that allows easily disabling the insertion of new blocks in the
184 : // quarantine.
185 E : PushResult Push(const CompactBlockInfo& info) override {
186 E : if (refuse_push_) {
187 E : PushResult result = {false, 0};
188 E : return result;
189 : }
190 E : return ZebraBlockHeap::Push(info);
191 E : }
192 :
193 : // Enable/Disable future allocations.
194 E : void set_refuse_allocations(bool value) {
195 E : refuse_allocations_ = value;
196 E : }
197 :
198 : // Enable/Disable the insertion of blocks in the quarantine.
199 E : void set_refuse_push(bool value) {
200 E : refuse_push_ = value;
201 E : }
202 :
203 : protected:
204 : bool refuse_allocations_;
205 : bool refuse_push_;
206 :
207 : private:
208 : DISALLOW_COPY_AND_ASSIGN(TestZebraBlockHeap);
209 : };
210 :
211 : // A derived class to expose protected members for unit-testing.
212 : class TestBlockHeapManager : public BlockHeapManager {
213 : public:
214 : using BlockHeapManager::HeapQuarantinePair;
215 :
216 : using BlockHeapManager::FreePotentiallyCorruptBlock;
217 : using BlockHeapManager::GetHeapId;
218 : using BlockHeapManager::GetHeapFromId;
219 : using BlockHeapManager::GetHeapTypeUnlocked;
220 : using BlockHeapManager::GetQuarantineFromId;
221 : using BlockHeapManager::HeapMetadata;
222 : using BlockHeapManager::HeapQuarantineMap;
223 : using BlockHeapManager::IsValidHeapIdUnlocked;
224 : using BlockHeapManager::SetHeapErrorCallback;
225 : using BlockHeapManager::ShardedBlockQuarantine;
226 : using BlockHeapManager::TrimQuarantine;
227 :
228 : using BlockHeapManager::allocation_filter_flag_tls_;
229 : using BlockHeapManager::heaps_;
230 : using BlockHeapManager::large_block_heap_id_;
231 : using BlockHeapManager::locked_heaps_;
232 : using BlockHeapManager::parameters_;
233 : using BlockHeapManager::shared_quarantine_;
234 : using BlockHeapManager::zebra_block_heap_;
235 : using BlockHeapManager::zebra_block_heap_id_;
236 :
237 : // A derived class to expose protected members for unit-testing. This has to
238 : // be nested into this one because ShardedBlockQuarantine accesses some
239 : // protected fields of BlockHeapManager.
240 : //
241 : // This class should only expose some members or expose new functions, no new
242 : // member should be added.
243 : class TestQuarantine : public ShardedBlockQuarantine {
244 : public:
245 : using ShardedBlockQuarantine::Node;
246 : using ShardedBlockQuarantine::kShardingFactor;
247 : using ShardedBlockQuarantine::heads_;
248 : };
249 :
250 : // Constructor.
251 : TestBlockHeapManager(Shadow* shadow,
252 : StackCaptureCache* stack_cache,
253 : MemoryNotifierInterface* memory_notifier)
254 : : BlockHeapManager(shadow, stack_cache, memory_notifier) {}
255 :
256 : // Removes the heap with the given ID.
257 : void RemoveHeapById(HeapId heap_id) {
258 : if (heap_id == 0)
259 : return;
260 : BlockHeapInterface* heap = GetHeapFromId(heap_id);
261 : delete heap;
262 : EXPECT_EQ(1, heaps_.erase(heap));
263 : }
264 :
265 : // Wrapper for the set_parameters method. This also takes care of
266 : // reinitializing the variables that are usually initialized in the
267 : // constructor of a BlockHeapManager.
268 E : void SetParameters(const ::common::AsanParameters& params) {
269 : // Set the parameters.
270 : {
271 E : base::AutoLock lock(lock_);
272 E : parameters_ = params;
273 E : }
274 :
275 E : PropagateParameters();
276 E : }
277 :
278 : // Wrapper around DeferredFreeDoWork that allows for synchronization around
279 : // the actual work (pause for start and signal finish).
280 : void DeferredFreeDoWorkWithSync(base::WaitableEvent* start_event,
281 E : base::WaitableEvent* end_event) {
282 E : start_event->Wait();
283 E : BlockHeapManager::DeferredFreeDoWork();
284 E : end_event->Signal();
285 E : }
286 :
287 : // Enabled the deferred free thread with the above wrapper.
288 : void EnableDeferredFreeWithSync(base::WaitableEvent* start_event,
289 E : base::WaitableEvent* end_event) {
290 E : EnableDeferredFreeThreadWithCallback(
291 : base::Bind(&TestBlockHeapManager::DeferredFreeDoWorkWithSync,
292 : base::Unretained(this), start_event, end_event));
293 E : }
294 : };
295 :
296 : // A derived class to expose protected members for unit-testing.
297 : class TestAsanRuntime : public agent::asan::AsanRuntime {
298 : public:
299 : using agent::asan::AsanRuntime::heap_manager_;
300 : };
301 :
302 : // A utility class for manipulating a heap. This automatically deletes the heap
303 : // and its content in the destructor and provides some utility functions.
304 : class ScopedHeap {
305 : public:
306 : typedef TestBlockHeapManager::TestQuarantine TestQuarantine;
307 :
308 : // Constructor.
309 E : explicit ScopedHeap(TestBlockHeapManager* heap_manager)
310 E : : heap_manager_(heap_manager) {
311 E : heap_id_ = heap_manager->CreateHeap();
312 E : EXPECT_NE(0u, heap_id_);
313 E : alloc_functor_.reset(new AllocateFromHeapManagerHelper(heap_manager,
314 : heap_id_,
315 : 13));
316 E : }
317 :
318 : // Destructor. Destroy the heap, this will flush its quarantine and delete all
319 : // the structures associated with this heap.
320 E : ~ScopedHeap() {
321 E : ReleaseHeap();
322 E : }
323 :
324 E : void ReleaseHeap() {
325 E : if (heap_id_ != 0) {
326 E : EXPECT_TRUE(heap_manager_->DestroyHeap(heap_id_));
327 E : heap_id_ = 0;
328 : }
329 E : }
330 :
331 : // Retrieves the quarantine associated with this heap.
332 E : BlockQuarantineInterface* GetQuarantine() {
333 E : return heap_manager_->GetQuarantineFromId(heap_id_);
334 E : }
335 :
336 : // Allocate a block of @p size bytes.
337 E : void* Allocate(size_t size) {
338 E : return (*alloc_functor_)(size);
339 E : }
340 :
341 : // Free the block @p mem.
342 E : bool Free(void* mem) {
343 E : return heap_manager_->Free(heap_id_, mem);
344 E : }
345 :
346 : // Flush the quarantine of this heap.
347 E : void FlushQuarantine() {
348 E : BlockQuarantineInterface* quarantine = GetQuarantine();
349 E : EXPECT_NE(static_cast<BlockQuarantineInterface*>(nullptr),
350 : quarantine);
351 E : BlockQuarantineInterface::ObjectVector blocks_to_free;
352 E : quarantine->Empty(&blocks_to_free);
353 : BlockQuarantineInterface::ObjectVector::iterator iter_block =
354 E : blocks_to_free.begin();
355 E : for (; iter_block != blocks_to_free.end(); ++iter_block) {
356 E : const CompactBlockInfo& compact = *iter_block;
357 E : BlockInfo expanded = {};
358 E : ConvertBlockInfo(compact, &expanded);
359 E : CHECK(heap_manager_->FreePotentiallyCorruptBlock(&expanded));
360 E : }
361 E : }
362 :
363 : // Returns the underlying heap ID.
364 E : HeapId Id() { return heap_id_; }
365 :
366 : // Determines if the address @p mem corresponds to a block in the quarantine
367 : // of this heap.
368 E : bool InQuarantine(const void* mem) {
369 : // As we'll cast an AsanShardedQuarantine directly into a TestQuarantine
370 : // there shouldn't be any new field defined by this class, this should only
371 : // act as an interface allowing to access some private fields.
372 : static_assert(sizeof(TestQuarantine) ==
373 : sizeof(TestBlockHeapManager::ShardedBlockQuarantine),
374 : "TestQuarantine isn't an interface.");
375 : TestQuarantine* test_quarantine =
376 E : reinterpret_cast<TestQuarantine*>(GetQuarantine());
377 E : EXPECT_NE(static_cast<TestQuarantine*>(nullptr), test_quarantine);
378 : // Search through all of the shards.
379 E : for (size_t i = 0; i < test_quarantine->kShardingFactor; ++i) {
380 : // Search through all blocks in each shard.
381 E : TestQuarantine::Node* current_node = test_quarantine->heads_[i];
382 E : while (current_node != nullptr) {
383 : const uint8_t* body =
384 E : reinterpret_cast<const uint8_t*>(current_node->object.header) +
385 : current_node->object.header_size;
386 E : if (body == mem) {
387 E : EXPECT_TRUE(
388 : current_node->object.header->state == QUARANTINED_BLOCK ||
389 : current_node->object.header->state == QUARANTINED_FLOODED_BLOCK);
390 E : return true;
391 : }
392 E : current_node = current_node->next;
393 E : }
394 E : }
395 :
396 E : return false;
397 E : }
398 :
399 : // Returns the heap supported features.
400 E : uint32_t GetHeapFeatures() {
401 E : return heap_manager_->GetHeapFromId(heap_id_)->GetHeapFeatures();
402 E : }
403 :
404 : private:
405 : // The heap manager owning the underlying heap.
406 : TestBlockHeapManager* heap_manager_;
407 :
408 : // The underlying heap.
409 : HeapId heap_id_;
410 :
411 : // The allocation functor.
412 : std::unique_ptr<AllocateFromHeapManagerHelper> alloc_functor_;
413 : };
414 :
415 : // A value-parameterized test class for testing the BlockHeapManager class.
416 : class BlockHeapManagerTest : public testing::TestWithAsanRuntime {
417 : public:
418 : typedef TestBlockHeapManager::ShardedBlockQuarantine ShardedBlockQuarantine;
419 : typedef testing::TestWithAsanRuntime Super;
420 :
421 : BlockHeapManagerTest()
422 E : : TestWithAsanRuntime(&test_runtime_), heap_manager_(),
423 E : test_zebra_block_heap_(nullptr) {
424 E : }
425 :
426 E : void SetUp() override {
427 E : Super::SetUp();
428 E : heap_manager_ = reinterpret_cast<TestBlockHeapManager*>(
429 : test_runtime_.heap_manager_.get());
430 :
431 E : override_manager_.OverrideRegistry(RegistryCache::kRegistryRootKey);
432 :
433 : // Set the error callback that the manager will use.
434 E : heap_manager_->SetHeapErrorCallback(
435 : base::Bind(&BlockHeapManagerTest::OnHeapError, base::Unretained(this)));
436 :
437 : ::common::AsanParameters params;
438 E : ::common::SetDefaultAsanParameters(¶ms);
439 E : heap_manager_->SetParameters(params);
440 E : }
441 :
442 E : void TearDown() override {
443 E : heap_manager_ = nullptr;
444 E : Super::TearDown();
445 E : }
446 :
447 E : void OnHeapError(AsanErrorInfo* error) {
448 E : errors_.push_back(*error);
449 E : }
450 :
451 : // Calculates the Asan size for an allocation of @p user_size bytes.
452 E : size_t GetAllocSize(size_t user_size) {
453 E : BlockLayout layout = {};
454 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, user_size, 0,
455 : heap_manager_->parameters().trailer_padding_size + sizeof(BlockTrailer),
456 : &layout));
457 E : return layout.block_size;
458 E : }
459 :
460 E : void EnableTestZebraBlockHeap() {
461 : // Erase previous ZebraBlockHeap.
462 E : if (heap_manager_->zebra_block_heap_ != 0) {
463 i : heap_manager_->heaps_.erase(heap_manager_->zebra_block_heap_);
464 i : delete heap_manager_->zebra_block_heap_;
465 : }
466 : // Plug a mock ZebraBlockHeap by default disabled.
467 E : test_zebra_block_heap_ = new TestZebraBlockHeap(
468 : runtime_->memory_notifier());
469 E : heap_manager_->zebra_block_heap_ = test_zebra_block_heap_;
470 : TestBlockHeapManager::HeapMetadata heap_metadata =
471 E : { test_zebra_block_heap_, false };
472 E : auto result = heap_manager_->heaps_.insert(std::make_pair(
473 : test_zebra_block_heap_, heap_metadata));
474 E : heap_manager_->zebra_block_heap_id_ = heap_manager_->GetHeapId(result);
475 :
476 : // Turn on the zebra_block_heap_enabled flag.
477 E : ::common::AsanParameters params = heap_manager_->parameters();
478 E : params.enable_zebra_block_heap = true;
479 E : heap_manager_->set_parameters(params);
480 E : }
481 :
482 E : void EnableLargeBlockHeap(size_t large_allocation_threshold) {
483 E : ::common::AsanParameters params = heap_manager_->parameters();
484 E : params.enable_large_block_heap = true;
485 E : params.large_allocation_threshold = large_allocation_threshold;
486 E : heap_manager_->set_parameters(params);
487 E : CHECK_NE(0u, heap_manager_->large_block_heap_id_);
488 E : }
489 :
490 : // Verifies that [alloc, alloc + size) is accessible, and that
491 : // [alloc - 1] and [alloc+size] are poisoned.
492 E : void VerifyAllocAccess(void* alloc, size_t size) {
493 E : uint8_t* mem = reinterpret_cast<uint8_t*>(alloc);
494 E : ASSERT_FALSE(runtime_->shadow()->IsAccessible(mem - 1));
495 E : ASSERT_TRUE(runtime_->shadow()->IsLeftRedzone(mem - 1));
496 E : for (size_t i = 0; i < size; ++i)
497 E : ASSERT_TRUE(runtime_->shadow()->IsAccessible(mem + i));
498 E : ASSERT_FALSE(runtime_->shadow()->IsAccessible(mem + size));
499 E : }
500 :
501 : // Verifies that [alloc-1, alloc+size] is poisoned.
502 E : void VerifyFreedAccess(void* alloc, size_t size) {
503 E : uint8_t* mem = reinterpret_cast<uint8_t*>(alloc);
504 E : ASSERT_FALSE(runtime_->shadow()->IsAccessible(mem - 1));
505 E : ASSERT_TRUE(runtime_->shadow()->IsLeftRedzone(mem - 1));
506 E : for (size_t i = 0; i < size; ++i) {
507 E : ASSERT_FALSE(runtime_->shadow()->IsAccessible(mem + i));
508 E : ASSERT_EQ(runtime_->shadow()->GetShadowMarkerForAddress(mem + i),
509 : kHeapFreedMarker);
510 E : }
511 E : ASSERT_FALSE(runtime_->shadow()->IsAccessible(mem + size));
512 E : }
513 :
514 : void QuarantineAltersBlockContents(
515 : float quarantine_flood_fill_rate,
516 : size_t iterations,
517 : size_t min_flood_filled,
518 E : size_t max_flood_filled) {
519 E : const size_t kAllocSize = 13;
520 E : ScopedHeap heap(heap_manager_);
521 : // Ensure that the quarantine is large enough to keep this block.
522 E : ::common::AsanParameters parameters = heap_manager_->parameters();
523 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
524 E : parameters.quarantine_flood_fill_rate = quarantine_flood_fill_rate;
525 E : heap_manager_->set_parameters(parameters);
526 :
527 : // This test gets run repeatedly, and it is expected that some portion of
528 : // the blocks contents will be flood-filled.
529 E : size_t flood_filled_count = 0;
530 E : for (size_t i = 0; i < iterations; ++i) {
531 : // Allocate a block and fill it with random data.
532 E : void* mem = heap.Allocate(kAllocSize);
533 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
534 E : base::RandBytes(mem, kAllocSize);
535 :
536 : // Hash the contents of the block before being quarantined.
537 E : unsigned char sha1_before[base::kSHA1Length] = {};
538 E : base::SHA1HashBytes(reinterpret_cast<unsigned char*>(mem),
539 : kAllocSize,
540 : sha1_before);
541 :
542 : // Free the block and ensure it gets quarantined.
543 E : BlockHeader* header = BlockGetHeaderFromBody(
544 : reinterpret_cast<BlockBody*>(mem));
545 E : ASSERT_TRUE(heap.Free(mem));
546 E : EXPECT_TRUE(
547 : static_cast<BlockState>(header->state) == QUARANTINED_BLOCK ||
548 : static_cast<BlockState>(header->state) == QUARANTINED_FLOODED_BLOCK);
549 :
550 E : if (static_cast<BlockState>(header->state) == QUARANTINED_BLOCK) {
551 : // If the block is quarantined and not flood-filled then ensure that the
552 : // contents have not changed.
553 E : unsigned char sha1_after[base::kSHA1Length] = {};
554 E : base::SHA1HashBytes(reinterpret_cast<unsigned char*>(mem),
555 : kAllocSize,
556 : sha1_after);
557 E : EXPECT_EQ(0, memcmp(sha1_before, sha1_after, base::kSHA1Length));
558 E : } else {
559 : // If the block is quarantined and flood-filled then ensure that has
560 : // actually happened.
561 E : EXPECT_EQ(QUARANTINED_FLOODED_BLOCK,
562 : static_cast<BlockState>(header->state));
563 E : BlockHeader* header = BlockGetHeaderFromBody(
564 : reinterpret_cast<BlockBody*>(mem));
565 E : BlockInfo block_info = {};
566 E : EXPECT_TRUE(BlockInfoFromMemory(header, &block_info));
567 E : EXPECT_TRUE(BlockBodyIsFloodFilled(block_info));
568 E : ++flood_filled_count;
569 : }
570 :
571 : // Ensure the quarantine is flushed. Otherwise the next block to be
572 : // allocated might not even make it into the quarantine because a block
573 : // is randomly evicted.
574 E : heap.FlushQuarantine();
575 E : }
576 :
577 E : EXPECT_LE(min_flood_filled, flood_filled_count);
578 E : EXPECT_LE(flood_filled_count, max_flood_filled);
579 E : }
580 :
581 : protected:
582 : // The heap manager used in these tests.
583 : TestBlockHeapManager* heap_manager_;
584 :
585 : // Info about the last errors reported.
586 : std::vector<AsanErrorInfo> errors_;
587 :
588 : // The mock ZebraBlockHeap used in the tests.
589 : TestZebraBlockHeap* test_zebra_block_heap_;
590 :
591 : // The runtime used by those tests.
592 : TestAsanRuntime test_runtime_;
593 :
594 : // Prevent the tests from polluting the registry.
595 : registry_util::RegistryOverrideManager override_manager_;
596 : };
597 :
598 : } // namespace
599 :
600 E : TEST_F(BlockHeapManagerTest, AllocAndFree) {
601 E : const size_t kAllocSize = 17;
602 E : HeapId heap_id = heap_manager_->CreateHeap();
603 E : EXPECT_NE(0u, heap_id);
604 E : void* alloc = heap_manager_->Allocate(heap_id, kAllocSize);
605 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
606 E : EXPECT_LE(kAllocSize, heap_manager_->Size(heap_id, alloc));
607 E : EXPECT_TRUE(heap_manager_->Free(heap_id, alloc));
608 E : EXPECT_TRUE(heap_manager_->DestroyHeap(heap_id));
609 E : }
610 :
611 E : TEST_F(BlockHeapManagerTest, FreeNullPointer) {
612 E : HeapId heap_id = heap_manager_->CreateHeap();
613 E : EXPECT_NE(0u, heap_id);
614 E : EXPECT_TRUE(heap_manager_->Free(heap_id, static_cast<void*>(nullptr)));
615 E : EXPECT_TRUE(heap_manager_->DestroyHeap(heap_id));
616 E : }
617 :
618 E : TEST_F(BlockHeapManagerTest, FreeUnguardedAlloc) {
619 E : const size_t kAllocSize = 100;
620 E : ::common::AsanParameters params = heap_manager_->parameters();
621 E : params.allocation_guard_rate = 0.0;
622 E : heap_manager_->set_parameters(params);
623 :
624 E : ScopedHeap heap(heap_manager_);
625 :
626 E : void* heap_alloc = heap.Allocate(kAllocSize);
627 E : EXPECT_NE(static_cast<void*>(nullptr), heap_alloc);
628 :
629 E : void* process_heap_alloc = ::HeapAlloc(::GetProcessHeap(), 0, kAllocSize);
630 E : EXPECT_NE(static_cast<void*>(nullptr), process_heap_alloc);
631 :
632 E : BlockHeapInterface* process_heap = heap_manager_->GetHeapFromId(
633 : heap_manager_->process_heap());
634 E : void* process_heap_wrapper_alloc = process_heap->Allocate(kAllocSize);
635 E : EXPECT_NE(static_cast<void*>(nullptr), process_heap_wrapper_alloc);
636 :
637 E : EXPECT_TRUE(heap_manager_->Free(heap.Id(), heap_alloc));
638 E : EXPECT_TRUE(heap_manager_->Free(heap_manager_->process_heap(),
639 E : process_heap_alloc));
640 E : EXPECT_TRUE(heap_manager_->Free(heap_manager_->process_heap(),
641 E : process_heap_wrapper_alloc));
642 E : }
643 :
644 E : TEST_F(BlockHeapManagerTest, PopOnSetQuarantineMaxSize) {
645 E : const size_t kAllocSize = 100;
646 E : size_t real_alloc_size = GetAllocSize(kAllocSize);
647 E : ScopedHeap heap(heap_manager_);
648 E : void* mem = heap.Allocate(kAllocSize);
649 E : ASSERT_FALSE(heap.InQuarantine(mem));
650 :
651 E : ::common::AsanParameters parameters = heap_manager_->parameters();
652 E : parameters.quarantine_size = real_alloc_size;
653 E : heap_manager_->set_parameters(parameters);
654 :
655 E : ASSERT_TRUE(heap.Free(mem));
656 E : ASSERT_TRUE(heap.InQuarantine(mem));
657 :
658 : // We resize the quarantine to a smaller size, the block should pop out.
659 E : parameters.quarantine_size = real_alloc_size - 1;
660 E : heap_manager_->set_parameters(parameters);
661 E : ASSERT_FALSE(heap.InQuarantine(mem));
662 E : }
663 :
664 E : TEST_F(BlockHeapManagerTest, Quarantine) {
665 E : const size_t kAllocSize = 100;
666 E : size_t real_alloc_size = GetAllocSize(kAllocSize);
667 E : const size_t number_of_allocs = 16;
668 E : ScopedHeap heap(heap_manager_);
669 :
670 E : ::common::AsanParameters parameters = heap_manager_->parameters();
671 E : parameters.quarantine_size = real_alloc_size * number_of_allocs;
672 E : heap_manager_->set_parameters(parameters);
673 :
674 : // Allocate a bunch of blocks until exactly one is removed from the
675 : // quarantine.
676 E : std::vector<void*> blocks;
677 E : for (size_t i = 0; i < number_of_allocs + 1; ++i) {
678 E : void* mem = heap.Allocate(kAllocSize);
679 E : ASSERT_TRUE(mem != nullptr);
680 E : heap.Free(mem);
681 E : blocks.push_back(mem);
682 E : if (i < number_of_allocs)
683 E : ASSERT_TRUE(heap.InQuarantine(mem));
684 E : }
685 :
686 E : size_t blocks_in_quarantine = 0;
687 E : for (size_t i = 0; i < blocks.size(); ++i) {
688 E : if (heap.InQuarantine(blocks[i]))
689 E : ++blocks_in_quarantine;
690 E : }
691 E : EXPECT_EQ(number_of_allocs, blocks_in_quarantine);
692 E : }
693 :
694 E : TEST_F(BlockHeapManagerTest, QuarantineLargeBlock) {
695 E : const size_t kLargeAllocSize = 100;
696 E : const size_t kSmallAllocSize = 25;
697 E : size_t real_large_alloc_size = GetAllocSize(kLargeAllocSize);
698 E : size_t real_small_alloc_size = GetAllocSize(kSmallAllocSize);
699 :
700 E : ScopedHeap heap(heap_manager_);
701 E : ::common::AsanParameters parameters = heap_manager_->parameters();
702 E : parameters.quarantine_size = real_large_alloc_size;
703 E : parameters.quarantine_block_size = real_large_alloc_size;
704 E : heap_manager_->set_parameters(parameters);
705 :
706 : // A block larger than the quarantine should not make it in.
707 E : void* mem1 = heap.Allocate(real_large_alloc_size + 1);
708 E : ASSERT_NE(static_cast<void*>(nullptr), mem1);
709 E : EXPECT_TRUE(heap.Free(mem1));
710 E : EXPECT_FALSE(heap.InQuarantine(mem1));
711 E : EXPECT_EQ(0u, heap.GetQuarantine()->GetCountForTesting());
712 :
713 : // A smaller block should make it because our current max block size allows
714 : // it.
715 E : void* mem2 = heap.Allocate(kSmallAllocSize);
716 E : ASSERT_NE(static_cast<void*>(nullptr), mem2);
717 E : EXPECT_TRUE(heap.Free(mem2));
718 E : EXPECT_TRUE(heap.InQuarantine(mem2));
719 :
720 E : parameters.quarantine_block_size = real_small_alloc_size - 1;
721 E : heap_manager_->set_parameters(parameters);
722 :
723 : // A second small block should not make it in since we changed the block size.
724 : // However, the other block should remain in the quarantine.
725 E : void* mem3 = heap.Allocate(kSmallAllocSize);
726 E : ASSERT_NE(static_cast<void*>(nullptr), mem3);
727 E : EXPECT_TRUE(heap.Free(mem3));
728 E : EXPECT_TRUE(heap.InQuarantine(mem2));
729 E : EXPECT_FALSE(heap.InQuarantine(mem3));
730 E : }
731 :
732 E : TEST_F(BlockHeapManagerTest, UnpoisonsQuarantine) {
733 E : const size_t kAllocSize = 100;
734 E : const size_t real_alloc_size = GetAllocSize(kAllocSize);
735 :
736 E : ScopedHeap heap(heap_manager_);
737 E : ::common::AsanParameters parameters = heap_manager_->parameters();
738 E : parameters.quarantine_size = real_alloc_size;
739 E : heap_manager_->set_parameters(parameters);
740 :
741 : // Allocate a memory block and directly free it, this puts it in the
742 : // quarantine.
743 E : void* mem = heap.Allocate(kAllocSize);
744 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
745 E : ASSERT_TRUE(heap.Free(mem));
746 E : ASSERT_TRUE(heap.InQuarantine(mem));
747 :
748 : // Assert that the shadow memory has been correctly poisoned.
749 E : intptr_t mem_start = reinterpret_cast<intptr_t>(BlockGetHeaderFromBody(
750 : reinterpret_cast<BlockBody*>(mem)));
751 E : ASSERT_EQ(0, (mem_start & 7) );
752 E : size_t shadow_start = mem_start >> 3;
753 E : size_t shadow_alloc_size = real_alloc_size >> 3;
754 E : for (size_t i = shadow_start; i < shadow_start + shadow_alloc_size; ++i)
755 E : ASSERT_NE(kHeapAddressableMarker, runtime_->shadow()->shadow()[i]);
756 :
757 : // Flush the quarantine.
758 E : heap.FlushQuarantine();
759 :
760 : // Assert that the quarantine has been correctly unpoisoned.
761 E : for (size_t i = shadow_start; i < shadow_start + shadow_alloc_size; ++i) {
762 : if ((heap.GetHeapFeatures() &
763 E : HeapInterface::kHeapReportsReservations) != 0) {
764 i : ASSERT_EQ(kAsanReservedMarker, runtime_->shadow()->shadow()[i]);
765 i : } else {
766 E : ASSERT_EQ(kHeapAddressableMarker, runtime_->shadow()->shadow()[i]);
767 : }
768 E : }
769 E : }
770 :
771 E : TEST_F(BlockHeapManagerTest, QuarantineIsShared) {
772 E : const size_t kAllocSize = 100;
773 E : const size_t real_alloc_size = GetAllocSize(kAllocSize);
774 E : ScopedHeap heap_1(heap_manager_);
775 E : ScopedHeap heap_2(heap_manager_);
776 :
777 E : ASSERT_EQ(heap_1.GetQuarantine(), heap_2.GetQuarantine());
778 :
779 E : ::common::AsanParameters parameters = heap_manager_->parameters();
780 E : parameters.quarantine_size = real_alloc_size * 4;
781 E : heap_manager_->set_parameters(parameters);
782 :
783 E : void* heap_1_mem1 = heap_1.Allocate(kAllocSize);
784 E : ASSERT_NE(static_cast<void*>(nullptr), heap_1_mem1);
785 E : void* heap_1_mem2 = heap_1.Allocate(kAllocSize);
786 E : ASSERT_NE(static_cast<void*>(nullptr), heap_1_mem2);
787 E : void* heap_2_mem1 = heap_2.Allocate(kAllocSize);
788 E : ASSERT_NE(static_cast<void*>(nullptr), heap_2_mem1);
789 E : void* heap_2_mem2 = heap_2.Allocate(kAllocSize);
790 E : ASSERT_NE(static_cast<void*>(nullptr), heap_2_mem2);
791 :
792 E : EXPECT_TRUE(heap_1.Free(heap_1_mem1));
793 E : EXPECT_TRUE(heap_1.Free(heap_1_mem2));
794 E : EXPECT_TRUE(heap_2.Free(heap_2_mem1));
795 E : EXPECT_TRUE(heap_2.Free(heap_2_mem2));
796 :
797 E : EXPECT_TRUE(heap_1.InQuarantine(heap_1_mem1));
798 E : EXPECT_TRUE(heap_1.InQuarantine(heap_1_mem2));
799 E : EXPECT_TRUE(heap_2.InQuarantine(heap_2_mem1));
800 E : EXPECT_TRUE(heap_2.InQuarantine(heap_2_mem2));
801 :
802 E : BlockQuarantineInterface* quarantine = heap_1.GetQuarantine();
803 E : EXPECT_EQ(4, quarantine->GetCountForTesting());
804 E : heap_2.ReleaseHeap();
805 E : EXPECT_EQ(2, quarantine->GetCountForTesting());
806 E : heap_1.ReleaseHeap();
807 E : EXPECT_EQ(0, quarantine->GetCountForTesting());
808 E : }
809 :
810 E : TEST_F(BlockHeapManagerTest, AllocZeroBytes) {
811 E : ScopedHeap heap(heap_manager_);
812 E : void* mem1 = heap.Allocate(0);
813 E : ASSERT_NE(static_cast<void*>(nullptr), mem1);
814 E : void* mem2 = heap.Allocate(0);
815 E : ASSERT_NE(static_cast<void*>(nullptr), mem2);
816 E : ASSERT_NE(mem1, mem2);
817 E : ASSERT_TRUE(heap.Free(mem1));
818 E : ASSERT_TRUE(heap.Free(mem2));
819 E : }
820 :
821 E : TEST_F(BlockHeapManagerTest, AllocInvalidBlockSize) {
822 E : ScopedHeap heap(heap_manager_);
823 E : const size_t kInvalidSize = SIZE_MAX;
824 E : void* mem = heap.Allocate(kInvalidSize);
825 E : ASSERT_EQ(static_cast<void*>(nullptr), mem);
826 E : }
827 :
828 E : TEST_F(BlockHeapManagerTest, Size) {
829 E : const size_t kMaxAllocSize = 134584;
830 E : ScopedHeap heap(heap_manager_);
831 E : for (size_t size = 10; size < kMaxAllocSize; size = size * 5 + 123) {
832 E : void* mem = heap.Allocate(size);
833 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
834 E : ASSERT_EQ(size, heap_manager_->Size(heap.Id(), mem));
835 E : ASSERT_TRUE(heap.Free(mem));
836 E : }
837 E : }
838 :
839 E : TEST_F(BlockHeapManagerTest, AllocsAccessibility) {
840 E : const size_t kMaxAllocSize = 134584;
841 E : ScopedHeap heap(heap_manager_);
842 : // Ensure that the quarantine is large enough to keep the allocated blocks in
843 : // this test.
844 E : ::common::AsanParameters parameters = heap_manager_->parameters();
845 E : parameters.quarantine_size = kMaxAllocSize * 2;
846 E : heap_manager_->set_parameters(parameters);
847 E : for (size_t size = 10; size < kMaxAllocSize; size = size * 5 + 123) {
848 : // Do an alloc/free and test that access is correctly managed.
849 E : void* mem = heap.Allocate(size);
850 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
851 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(mem, size));
852 E : ASSERT_TRUE(heap.Free(mem));
853 E : ASSERT_NO_FATAL_FAILURE(VerifyFreedAccess(mem, size));
854 E : }
855 E : }
856 :
857 E : TEST_F(BlockHeapManagerTest, LockUnlock) {
858 E : ScopedHeap heap(heap_manager_);
859 : // We can't really test these, aside from not crashing.
860 E : ASSERT_NO_FATAL_FAILURE(heap_manager_->Lock(heap.Id()));
861 E : ASSERT_NO_FATAL_FAILURE(heap_manager_->Unlock(heap.Id()));
862 E : }
863 :
864 E : TEST_F(BlockHeapManagerTest, CaptureTID) {
865 E : const size_t kAllocSize = 13;
866 E : ScopedHeap heap(heap_manager_);
867 : // Ensure that the quarantine is large enough to keep this block.
868 E : ::common::AsanParameters parameters = heap_manager_->parameters();
869 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
870 E : heap_manager_->set_parameters(parameters);
871 E : uint8_t* mem = static_cast<uint8_t*>(heap.Allocate(kAllocSize));
872 E : BlockBody* body = reinterpret_cast<BlockBody*>(mem);
873 E : ASSERT_TRUE(heap.Free(mem));
874 E : BlockHeader* header = BlockGetHeaderFromBody(body);
875 E : ASSERT_NE(static_cast<BlockHeader*>(nullptr), header);
876 E : EXPECT_TRUE(header->state == QUARANTINED_BLOCK ||
877 E : header->state == QUARANTINED_FLOODED_BLOCK);
878 E : BlockInfo block_info = {};
879 E : EXPECT_TRUE(BlockInfoFromMemory(header, &block_info));
880 E : EXPECT_NE(static_cast<BlockTrailer*>(nullptr), block_info.trailer);
881 :
882 E : EXPECT_EQ(block_info.trailer->alloc_tid, ::GetCurrentThreadId());
883 E : EXPECT_EQ(block_info.trailer->free_tid, ::GetCurrentThreadId());
884 E : }
885 :
886 E : TEST_F(BlockHeapManagerTest, QuarantineNeverAltersBlockContents) {
887 : // No blocks should be flood-filled when the feature is disabled.
888 E : EXPECT_NO_FATAL_FAILURE(QuarantineAltersBlockContents(0.0f, 10, 0, 0));
889 E : }
890 :
891 E : TEST_F(BlockHeapManagerTest, QuarantineSometimesAltersBlockContents) {
892 : // 100 fair coin tosses has a stddev of 5. The flood filled count will pretty
893 : // much always be within 3 stddevs of half of the tests unless something went
894 : // terribly wrong.
895 E : EXPECT_NO_FATAL_FAILURE(QuarantineAltersBlockContents(
896 E : 0.5f, 100, 50 - 3 * 5, 50 + 3 * 5));
897 E : }
898 :
899 E : TEST_F(BlockHeapManagerTest, QuarantineAlwaysAltersBlockContents) {
900 : // All blocks should be flood-filled.
901 E : EXPECT_NO_FATAL_FAILURE(QuarantineAltersBlockContents(1.0f, 10, 10, 10));
902 E : }
903 :
904 E : TEST_F(BlockHeapManagerTest, SetTrailerPaddingSize) {
905 E : const size_t kAllocSize = 13;
906 E : ScopedHeap heap(heap_manager_);
907 : // Ensure that the quarantine is large enough to keep this block with the
908 : // extra padding.
909 E : ::common::AsanParameters parameters = heap_manager_->parameters();
910 E : parameters.quarantine_size = GetAllocSize(kAllocSize) * 5;
911 E : heap_manager_->set_parameters(parameters);
912 E : size_t original_alloc_size = GetAllocSize(kAllocSize);
913 E : ::common::AsanParameters original_parameter = heap_manager_->parameters();
914 :
915 E : for (size_t padding = 0; padding < 16; ++padding) {
916 E : ::common::AsanParameters new_parameter = original_parameter;
917 E : new_parameter.trailer_padding_size =
918 : original_parameter.trailer_padding_size + padding;
919 E : heap_manager_->set_parameters(new_parameter);
920 E : size_t augmented_alloc_size = GetAllocSize(kAllocSize);
921 E : EXPECT_GE(augmented_alloc_size, original_alloc_size);
922 :
923 E : void* mem = heap.Allocate(kAllocSize);
924 E : ASSERT_TRUE(mem != nullptr);
925 :
926 E : size_t offset = kAllocSize;
927 E : for (; offset < augmented_alloc_size - sizeof(BlockHeader);
928 E : ++offset) {
929 E : EXPECT_FALSE(runtime_->shadow()->IsAccessible(
930 E : reinterpret_cast<const uint8_t*>(mem) + offset));
931 E : }
932 E : ASSERT_TRUE(heap.Free(mem));
933 E : }
934 E : heap_manager_->set_parameters(original_parameter);
935 E : }
936 :
937 E : TEST_F(BlockHeapManagerTest, BlockChecksumUpdatedWhenEnterQuarantine) {
938 E : const size_t kAllocSize = 100;
939 E : size_t real_alloc_size = GetAllocSize(kAllocSize);
940 E : ScopedHeap heap(heap_manager_);
941 :
942 E : ::common::AsanParameters parameters = heap_manager_->parameters();
943 E : parameters.quarantine_size = real_alloc_size;
944 E : heap_manager_->set_parameters(parameters);
945 :
946 E : void* mem = heap.Allocate(kAllocSize);
947 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
948 E : BlockInfo block_info = {};
949 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(mem, &block_info));
950 E : EXPECT_TRUE(BlockChecksumIsValid(block_info));
951 E : heap.Free(mem);
952 E : EXPECT_TRUE(BlockChecksumIsValid(block_info));
953 E : ASSERT_TRUE(heap.InQuarantine(mem));
954 E : }
955 :
956 : static const size_t kChecksumRepeatCount = 10;
957 :
958 E : TEST_F(BlockHeapManagerTest, CorruptAsEntersQuarantine) {
959 E : const size_t kAllocSize = 100;
960 E : ::common::AsanParameters parameters = heap_manager_->parameters();
961 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
962 E : heap_manager_->set_parameters(parameters);
963 :
964 E : ScopedHeap heap(heap_manager_);
965 : // This can fail because of a checksum collision. However, we run it a
966 : // handful of times to keep the chances as small as possible.
967 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
968 E : heap.FlushQuarantine();
969 E : void* mem = heap.Allocate(kAllocSize);
970 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
971 E : reinterpret_cast<int*>(mem)[-1] = rand();
972 E : EXPECT_TRUE(heap.Free(mem));
973 :
974 : // Try again for all but the last attempt if this appears to have failed.
975 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
976 i : continue;
977 :
978 E : ASSERT_EQ(1u, errors_.size());
979 E : ASSERT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
980 E : ASSERT_EQ(mem, errors_[0].location);
981 :
982 E : break;
983 i : }
984 E : }
985 :
986 E : TEST_F(BlockHeapManagerTest, CorruptAsExitsQuarantine) {
987 E : const size_t kAllocSize = 100;
988 E : ::common::AsanParameters parameters = heap_manager_->parameters();
989 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
990 E : heap_manager_->set_parameters(parameters);
991 :
992 E : ScopedHeap heap(heap_manager_);
993 : // This can fail because of a checksum collision. However, we run it a
994 : // handful of times to keep the chances as small as possible.
995 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
996 E : heap.FlushQuarantine();
997 E : void* mem = heap.Allocate(kAllocSize);
998 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
999 E : EXPECT_TRUE(heap.Free(mem));
1000 E : EXPECT_TRUE(errors_.empty());
1001 :
1002 : // Change some of the block content and then flush the quarantine. The block
1003 : // hash should be invalid and it should cause an error to be fired.
1004 E : reinterpret_cast<int32_t*>(mem)[0] = rand();
1005 E : heap.FlushQuarantine();
1006 :
1007 : // Try again for all but the last attempt if this appears to have failed.
1008 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
1009 i : continue;
1010 :
1011 E : EXPECT_EQ(1u, errors_.size());
1012 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
1013 E : EXPECT_EQ(reinterpret_cast<const BlockHeader*>(mem) - 1,
1014 E : reinterpret_cast<const BlockHeader*>(errors_[0].location));
1015 :
1016 E : break;
1017 i : }
1018 E : }
1019 :
1020 E : TEST_F(BlockHeapManagerTest, CorruptAsExitsQuarantineOnHeapDestroy) {
1021 E : const size_t kAllocSize = 100;
1022 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1023 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
1024 E : heap_manager_->set_parameters(parameters);
1025 :
1026 : // This can fail because of a checksum collision. However, we run it a
1027 : // handful of times to keep the chances as small as possible.
1028 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
1029 E : void* mem = nullptr;
1030 : {
1031 E : ScopedHeap heap(heap_manager_);
1032 E : heap.FlushQuarantine();
1033 E : mem = heap.Allocate(kAllocSize);
1034 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
1035 E : EXPECT_TRUE(heap.Free(mem));
1036 E : EXPECT_TRUE(errors_.empty());
1037 :
1038 : // Change some of the block content to invalidate the block's hash.
1039 E : reinterpret_cast<int32_t*>(mem)[0] = rand();
1040 E : }
1041 :
1042 : // The destructor of |heap| should be called and all the quarantined blocks
1043 : // belonging to this heap should be freed, which should trigger an error as
1044 : // the block is now corrupt.
1045 :
1046 : // Try again for all but the last attempt if this appears to have failed.
1047 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
1048 i : continue;
1049 :
1050 E : EXPECT_EQ(1u, errors_.size());
1051 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
1052 E : EXPECT_EQ(reinterpret_cast<const BlockHeader*>(mem) - 1,
1053 E : reinterpret_cast<const BlockHeader*>(errors_[0].location));
1054 :
1055 E : break;
1056 i : }
1057 E : }
1058 :
1059 E : TEST_F(BlockHeapManagerTest, CorruptHeapOnTrimQuarantine) {
1060 E : const size_t kAllocSize = 100;
1061 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1062 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
1063 E : heap_manager_->set_parameters(parameters);
1064 :
1065 : // This can fail because of a checksum collision. However, we run it a
1066 : // handful of times to keep the chances as small as possible.
1067 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
1068 E : void* mem = nullptr;
1069 : {
1070 E : ScopedHeap heap(heap_manager_);
1071 E : heap.FlushQuarantine();
1072 E : mem = heap.Allocate(kAllocSize);
1073 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
1074 E : EXPECT_TRUE(heap.Free(mem));
1075 E : EXPECT_TRUE(errors_.empty());
1076 :
1077 : // Change some of the block content to invalidate the block's hash.
1078 E : reinterpret_cast<int32_t*>(mem)[0] = rand();
1079 E : }
1080 :
1081 : // The destructor of |heap| should be called and all the quarantined blocks
1082 : // belonging to this heap should be freed, which should trigger an error as
1083 : // the block is now corrupt.
1084 :
1085 : // Try again for all but the last attempt if this appears to have failed.
1086 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
1087 i : continue;
1088 :
1089 E : EXPECT_EQ(1u, errors_.size());
1090 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
1091 E : EXPECT_EQ(reinterpret_cast<const BlockHeader*>(mem) - 1,
1092 E : reinterpret_cast<const BlockHeader*>(errors_[0].location));
1093 :
1094 E : break;
1095 i : }
1096 E : }
1097 :
1098 : // Prevent this test from being optimized, otherwise the loop that does the
1099 : // blocks allocations might get unwound and they won't have the same allocation
1100 : // stack trace.
1101 : #pragma optimize("", off)
1102 E : TEST_F(BlockHeapManagerTest, CorruptionIsReportedOnlyOnce) {
1103 E : const size_t kAllocSize = 100;
1104 E : const size_t kAllocs = 100;
1105 E : ASSERT_GT(kAllocs, kChecksumRepeatCount);
1106 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1107 E : parameters.quarantine_size = kAllocs * GetAllocSize(kAllocSize);
1108 E : parameters.prevent_duplicate_corruption_crashes = true;
1109 E : heap_manager_->set_parameters(parameters);
1110 :
1111 E : ScopedHeap heap(heap_manager_);
1112 E : std::vector<void*> allocs(kAllocs);
1113 :
1114 : // Allocate and free a lot of blocks with an identical stack id and corrupt
1115 : // them while they're in the quarantine.
1116 E : for (size_t i = 0; i < kAllocs; ++i) {
1117 E : void* mem = heap.Allocate(kAllocSize);
1118 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
1119 E : EXPECT_TRUE(heap.Free(mem));
1120 E : EXPECT_TRUE(errors_.empty());
1121 :
1122 : // Change some of the block content to corrupt it.
1123 E : reinterpret_cast<int32_t*>(mem)[0] ^= 0xFFFFFFFF;
1124 E : }
1125 :
1126 : // Empty the quarantine and free all the blocks that were in it. We should be
1127 : // reporting an error only for the first one.
1128 E : BlockQuarantineInterface::ObjectVector blocks;
1129 E : heap.GetQuarantine()->Empty(&blocks);
1130 E : bool first_corrupt_block_has_been_found = false;
1131 E : size_t i = 0;
1132 E : for (auto block : blocks) {
1133 E : errors_.clear();
1134 E : BlockInfo block_info = {};
1135 E : ConvertBlockInfo(block, &block_info);
1136 E : heap_manager_->FreePotentiallyCorruptBlock(&block_info);
1137 E : if (!first_corrupt_block_has_been_found && i < kChecksumRepeatCount) {
1138 E : if (!errors_.empty()) {
1139 E : EXPECT_EQ(1u, errors_.size());
1140 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
1141 E : first_corrupt_block_has_been_found = true;
1142 : }
1143 E : } else {
1144 E : EXPECT_TRUE(errors_.empty());
1145 : }
1146 E : ++i;
1147 E : }
1148 E : }
1149 : #pragma optimize("", on)
1150 :
1151 E : TEST_F(BlockHeapManagerTest, DoubleFree) {
1152 E : const size_t kAllocSize = 100;
1153 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1154 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
1155 E : heap_manager_->set_parameters(parameters);
1156 :
1157 E : ScopedHeap heap(heap_manager_);
1158 E : void* mem = heap.Allocate(kAllocSize);
1159 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
1160 E : EXPECT_TRUE(heap.Free(mem));
1161 E : EXPECT_FALSE(heap.Free(mem));
1162 :
1163 E : EXPECT_EQ(1u, errors_.size());
1164 E : EXPECT_EQ(DOUBLE_FREE, errors_[0].error_type);
1165 E : EXPECT_EQ(mem, errors_[0].location);
1166 E : }
1167 :
1168 E : TEST_F(BlockHeapManagerTest, SubsampledAllocationGuards) {
1169 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1170 E : parameters.allocation_guard_rate = 0.5;
1171 E : heap_manager_->set_parameters(parameters);
1172 E : ScopedHeap heap(heap_manager_);
1173 :
1174 E : size_t guarded_allocations = 0;
1175 E : size_t unguarded_allocations = 0;
1176 :
1177 : // Make a handful of allocations.
1178 E : const size_t kAllocationCount = 10000;
1179 : const size_t kAllocationSizes[] = {
1180 E : 1, 2, 4, 8, 14, 30, 128, 237, 500, 1000, 2036 };
1181 E : std::vector<void*> allocations;
1182 E : for (size_t i = 0; i < kAllocationCount; ++i) {
1183 E : size_t alloc_size = kAllocationSizes[i % arraysize(kAllocationSizes)];
1184 E : void* alloc = heap.Allocate(alloc_size);
1185 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1186 :
1187 E : for (size_t i = 0; i < alloc_size; ++i) {
1188 E : EXPECT_TRUE(runtime_->shadow()->IsAccessible(
1189 E : reinterpret_cast<uint8_t*>(alloc) + i));
1190 E : }
1191 :
1192 : // Determine if the allocation has guards or not.
1193 E : BlockHeader* header = BlockGetHeaderFromBody(
1194 : reinterpret_cast<BlockBody*>(alloc));
1195 E : if (header == nullptr) {
1196 E : ++unguarded_allocations;
1197 E : } else {
1198 E : ++guarded_allocations;
1199 : }
1200 :
1201 : if ((heap.GetHeapFeatures() &
1202 E : HeapInterface::kHeapSupportsGetAllocationSize) != 0) {
1203 : if ((heap.GetHeapFeatures() &
1204 E : HeapInterface::kHeapGetAllocationSizeIsUpperBound) != 0) {
1205 i : EXPECT_LE(alloc_size, heap_manager_->Size(heap.Id(), alloc));
1206 i : } else {
1207 E : EXPECT_EQ(alloc_size, heap_manager_->Size(heap.Id(), alloc));
1208 : }
1209 : }
1210 :
1211 : // Delete half of the allocations immediately, and keep half of them
1212 : // around for longer. This puts more of a stress test on the quarantine
1213 : // itself.
1214 E : if (base::RandDouble() < 0.5) {
1215 E : EXPECT_TRUE(heap.Free(alloc));
1216 E : } else {
1217 E : allocations.push_back(alloc);
1218 : }
1219 E : }
1220 :
1221 : // Free the outstanding allocations.
1222 E : for (size_t i = 0; i < allocations.size(); ++i)
1223 E : EXPECT_TRUE(heap.Free(allocations[i]));
1224 :
1225 : // Clear the quarantine. This should free up the remaining instrumented
1226 : // but quarantined blocks.
1227 E : EXPECT_NO_FATAL_FAILURE(heap.FlushQuarantine());
1228 :
1229 : // This could theoretically fail, but that would imply an extremely bad
1230 : // implementation of the underlying random number generator. There are 10000
1231 : // allocations. Since this is effectively a fair coin toss we expect a
1232 : // standard deviation of 0.5 * sqrt(10000) = 50. A 10% margin is
1233 : // 1000 / 50 = 20 standard deviations. For |z| > 20, the p-value is 5.5e-89,
1234 : // or 89 nines of confidence. That should keep any flake largely at bay.
1235 : // Thus, if this fails it's pretty much certain the implementation is at
1236 : // fault.
1237 E : EXPECT_LT(4 * kAllocationCount / 10, guarded_allocations);
1238 E : EXPECT_GT(6 * kAllocationCount / 10, guarded_allocations);
1239 E : }
1240 :
1241 : // Ensures that the ZebraBlockHeap overrides the provided heap.
1242 E : TEST_F(BlockHeapManagerTest, ZebraHeapIdInTrailerAfterAllocation) {
1243 E : EnableTestZebraBlockHeap();
1244 E : ScopedHeap heap(heap_manager_);
1245 E : const size_t kAllocSize = 0x100;
1246 E : void* alloc = heap.Allocate(kAllocSize);
1247 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1248 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1249 :
1250 : // Get the heap_id from the block trailer.
1251 E : BlockInfo block_info = {};
1252 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1253 :
1254 : {
1255 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1256 : // The heap_id stored in the block trailer should match the zebra heap id.
1257 E : EXPECT_EQ(heap_manager_->zebra_block_heap_id_,
1258 E : block_info.trailer->heap_id);
1259 E : }
1260 :
1261 E : EXPECT_TRUE(heap.Free(alloc));
1262 E : }
1263 :
1264 : // Ensures that the provided heap is used when the ZebraBlockHeap cannot handle
1265 : // the allocation.
1266 E : TEST_F(BlockHeapManagerTest, DefaultHeapIdInTrailerWhenZebraHeapIsFull) {
1267 E : EnableTestZebraBlockHeap();
1268 E : ScopedHeap heap(heap_manager_);
1269 E : const size_t kAllocSize = 0x100;
1270 : // Refuse allocations on the ZebraBlockHeap.
1271 E : test_zebra_block_heap_->set_refuse_allocations(true);
1272 :
1273 E : void* alloc = heap.Allocate(kAllocSize);
1274 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1275 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1276 :
1277 : // Get the heap_id from the block trailer.
1278 E : BlockInfo block_info = {};
1279 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1280 : {
1281 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1282 : // The heap_id stored in the block trailer match the provided heap.
1283 E : EXPECT_EQ(heap.Id(), block_info.trailer->heap_id);
1284 E : }
1285 E : EXPECT_TRUE(heap.Free(alloc));
1286 E : }
1287 :
1288 : // Allocations larger than the page size (4KB) will not be served by the zebra
1289 : // heap.
1290 E : TEST_F(BlockHeapManagerTest, AllocStress) {
1291 E : EnableTestZebraBlockHeap();
1292 E : ScopedHeap heap(heap_manager_);
1293 E : for (size_t i = 0; i < 3000; ++i) {
1294 : // Sometimes allocate more than one page, to ensure that allocations get
1295 : // spread across the ZebraBlockheap and normal heaps.
1296 E : const size_t kAllocSize = (i * 997) % (9 * 1024);
1297 E : void* alloc = heap.Allocate(kAllocSize);
1298 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1299 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1300 : // Free should succeed, even if the block is quarantined.
1301 E : EXPECT_TRUE(heap.Free(alloc));
1302 E : }
1303 E : }
1304 :
1305 : // The BlockHeapManager correctly quarantines the memory after free.
1306 E : TEST_F(BlockHeapManagerTest, QuarantinedAfterFree) {
1307 E : EnableTestZebraBlockHeap();
1308 E : ScopedHeap heap(heap_manager_);
1309 : // Always quarantine if possible.
1310 E : test_zebra_block_heap_->set_quarantine_ratio(1.0);
1311 :
1312 E : const size_t kAllocSize = 0x100;
1313 E : void* alloc = heap.Allocate(kAllocSize);
1314 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1315 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1316 : // Free should succeed, even if the block is quarantined.
1317 E : EXPECT_TRUE(heap.Free(alloc));
1318 : // The block should be quarantined and poisoned.
1319 E : ASSERT_NO_FATAL_FAILURE(VerifyFreedAccess(alloc, kAllocSize));
1320 E : BlockInfo block_info = {};
1321 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1322 :
1323 : {
1324 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1325 E : EXPECT_TRUE(block_info.header->state == QUARANTINED_BLOCK ||
1326 E : block_info.header->state == QUARANTINED_FLOODED_BLOCK);
1327 E : }
1328 E : }
1329 :
1330 : // set_parameters should set the zebra_block_heap_quarantine_ratio flag
1331 : // correctly.
1332 E : TEST_F(BlockHeapManagerTest, set_parametersSetsZebraBlockHeapQuarantineRatio) {
1333 E : EnableTestZebraBlockHeap();
1334 E : float new_ratio = 1.0f / 8;
1335 E : ::common::AsanParameters params = heap_manager_->parameters();
1336 E : params.zebra_block_heap_quarantine_ratio = new_ratio;
1337 E : heap_manager_->set_parameters(params);
1338 E : EXPECT_EQ(new_ratio, test_zebra_block_heap_->quarantine_ratio());
1339 E : }
1340 :
1341 : // Test for double free errors using the zebra heap.
1342 E : TEST_F(BlockHeapManagerTest, DoubleFreeOnZebraHeap) {
1343 E : EnableTestZebraBlockHeap();
1344 E : ScopedHeap heap(heap_manager_);
1345 E : test_zebra_block_heap_->set_quarantine_ratio(1.0);
1346 :
1347 E : const size_t kAllocSize = 0xFF;
1348 E : void* alloc = heap.Allocate(kAllocSize);
1349 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1350 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1351 :
1352 E : EXPECT_TRUE(heap.Free(alloc));
1353 E : EXPECT_FALSE(heap.Free(alloc));
1354 :
1355 E : EXPECT_EQ(1u, errors_.size());
1356 E : EXPECT_EQ(DOUBLE_FREE, errors_[0].error_type);
1357 E : EXPECT_EQ(alloc, errors_[0].location);
1358 E : }
1359 :
1360 E : TEST_F(BlockHeapManagerTest, AllocatedBlockIsProtected) {
1361 E : EnableTestZebraBlockHeap();
1362 E : ScopedHeap heap(heap_manager_);
1363 :
1364 E : const size_t kAllocSize = 0xFF;
1365 E : void* alloc = heap.Allocate(kAllocSize);
1366 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1367 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1368 :
1369 E : BlockInfo block_info = {};
1370 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1371 :
1372 : // Test the block protections before being quarantined.
1373 : // The whole block should be unpoisoned in the shadow memory.
1374 E : for (size_t i = 0; i < block_info.body_size; ++i)
1375 E : EXPECT_TRUE(runtime_->shadow()->IsAccessible(block_info.RawBody() + i));
1376 :
1377 : // Ensure that the block left redzone is page-protected.
1378 E : for (size_t i = 0; i < block_info.left_redzone_pages_size; ++i)
1379 i : EXPECT_TRUE(IsNotAccessible(block_info.left_redzone_pages + i));
1380 :
1381 : // Ensure that the block right redzone is page-protected.
1382 E : for (size_t i = 0; i < block_info.right_redzone_pages_size; ++i)
1383 E : EXPECT_TRUE(IsNotAccessible(block_info.right_redzone_pages + i));
1384 :
1385 : // The block body should be accessible.
1386 E : for (size_t i = 0; i < block_info.body_size; ++i)
1387 E : EXPECT_TRUE(IsAccessible(block_info.RawBody() + i));
1388 :
1389 : {
1390 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1391 E : EXPECT_EQ(ALLOCATED_BLOCK, block_info.header->state);
1392 E : }
1393 :
1394 E : EXPECT_TRUE(heap.Free(alloc));
1395 E : }
1396 :
1397 E : TEST_F(BlockHeapManagerTest, QuarantinedBlockIsProtected) {
1398 E : EnableTestZebraBlockHeap();
1399 E : ScopedHeap heap(heap_manager_);
1400 : // Always quarantine if possible.
1401 E : test_zebra_block_heap_->set_quarantine_ratio(1.0);
1402 :
1403 E : for (size_t i = 0; i < 20; ++i) {
1404 E : const size_t kAllocSize = 0xFF + i;
1405 E : void* alloc = heap.Allocate(kAllocSize);
1406 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1407 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1408 :
1409 E : BlockInfo block_info = {};
1410 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1411 :
1412 : // The block is freed and quarantined.
1413 E : EXPECT_TRUE(heap.Free(alloc));
1414 :
1415 : // Test the block protections after being quarantined.
1416 : // The whole block should be poisoned in the shadow memory.
1417 E : for (size_t i = 0; i < block_info.body_size; ++i) {
1418 E : EXPECT_FALSE(runtime_->shadow()->IsAccessible(block_info.RawBody() + i));
1419 E : }
1420 :
1421 : // Ensure that the block left redzone is page-protected.
1422 E : for (size_t i = 0; i < block_info.left_redzone_pages_size; ++i)
1423 i : EXPECT_TRUE(IsNotAccessible(block_info.left_redzone_pages + i));
1424 :
1425 : // Ensure that the block right redzone is page-protected.
1426 E : for (size_t i = 0; i < block_info.right_redzone_pages_size; ++i)
1427 E : EXPECT_TRUE(IsNotAccessible(block_info.right_redzone_pages + i));
1428 :
1429 : // Ensure that the block body is page-protected.
1430 E : for (size_t i = 0; i < block_info.body_size; ++i)
1431 E : EXPECT_TRUE(IsNotAccessible(block_info.RawBody() + i));
1432 :
1433 : {
1434 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1435 E : EXPECT_TRUE(block_info.header->state == QUARANTINED_BLOCK ||
1436 E : block_info.header->state == QUARANTINED_FLOODED_BLOCK);
1437 E : }
1438 E : }
1439 E : }
1440 :
1441 E : TEST_F(BlockHeapManagerTest, NonQuarantinedBlockIsMarkedAsFreed) {
1442 E : EnableTestZebraBlockHeap();
1443 E : ScopedHeap heap(heap_manager_);
1444 : // Desaible the zebra heap quarantine.
1445 E : test_zebra_block_heap_->set_refuse_push(true);
1446 :
1447 E : const size_t kAllocSize = 0x100;
1448 E : void* alloc = heap.Allocate(kAllocSize);
1449 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1450 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1451 :
1452 E : BlockInfo block_info = {};
1453 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1454 :
1455 : // The block is freed but not quarantined.
1456 E : EXPECT_TRUE(heap.Free(alloc));
1457 :
1458 : // The whole block should be unpoisoned in the shadow memory, and its
1459 : // associated pages unprotected.
1460 E : for (size_t i = 0; i < block_info.block_size; ++i) {
1461 E : ASSERT_NO_FATAL_FAILURE(runtime_->shadow()->IsAccessible(
1462 E : block_info.RawBlock() + i));
1463 E : ASSERT_FALSE(runtime_->shadow()->PageIsProtected(
1464 E : block_info.RawBlock() + i));
1465 E : }
1466 :
1467 E : EXPECT_EQ(FREED_BLOCK, block_info.header->state);
1468 E : }
1469 :
1470 E : TEST_F(BlockHeapManagerTest, ZebraBlockHeapQuarantineRatioIsRespected) {
1471 E : EnableTestZebraBlockHeap();
1472 E : ScopedHeap heap(heap_manager_);
1473 : // Set a non-standard quarantine ratio.
1474 E : float quarantine_ratio = 0.37f;
1475 E : test_zebra_block_heap_->set_quarantine_ratio(quarantine_ratio);
1476 :
1477 E : const size_t kAllocations = 2000;
1478 :
1479 E : size_t zebra_heap_size = test_zebra_block_heap_->slab_count_;
1480 E : const size_t max_quarantine_size = zebra_heap_size * quarantine_ratio;
1481 :
1482 : // All allocations have a maximum size of 1KB, all are served by the zebra
1483 : // heap.
1484 E : for (size_t i = 0; i < kAllocations; ++i) {
1485 E : const size_t kAllocSize = (0x100 + i) % 1024;
1486 E : void* alloc = heap.Allocate(kAllocSize);
1487 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1488 :
1489 E : BlockInfo block_info = {};
1490 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1491 E : EXPECT_TRUE(heap.Free(alloc));
1492 :
1493 : // After Free the quarantine should be trimmed, enforcing the quarantine
1494 : // size upper bound.
1495 E : EXPECT_LE(test_zebra_block_heap_->GetCountForTesting(),
1496 E : max_quarantine_size);
1497 :
1498 : {
1499 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1500 E : EXPECT_TRUE(block_info.header->state == QUARANTINED_BLOCK ||
1501 E : block_info.header->state == QUARANTINED_FLOODED_BLOCK);
1502 E : }
1503 E : }
1504 E : }
1505 :
1506 : // Ensures that the LargeBlockHeap overrides the provided heap if the allocation
1507 : // size exceeds the threshold.
1508 E : TEST_F(BlockHeapManagerTest, LargeBlockHeapUsedForLargeAllocations) {
1509 E : EnableLargeBlockHeap(GetPageSize());
1510 :
1511 : // Disable targeted heaps as it interferes with this test.
1512 E : ::common::AsanParameters params = heap_manager_->parameters();
1513 E : heap_manager_->SetParameters(params);
1514 :
1515 E : ScopedHeap heap(heap_manager_);
1516 :
1517 E : const size_t kAllocSize = GetPageSize() + 0x100;
1518 E : void* alloc = heap.Allocate(kAllocSize);
1519 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1520 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1521 :
1522 : // Get the heap_id from the block trailer.
1523 E : BlockInfo block_info = {};
1524 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1525 :
1526 : {
1527 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1528 : // The heap_id stored in the block trailer should match the large block
1529 : // heap id.
1530 E : EXPECT_EQ(heap_manager_->large_block_heap_id_,
1531 E : block_info.trailer->heap_id);
1532 E : }
1533 :
1534 E : EXPECT_TRUE(heap.Free(alloc));
1535 E : }
1536 :
1537 : // Ensures that the LargeBlockHeap is not used for a small allocation.
1538 E : TEST_F(BlockHeapManagerTest, LargeBlockHeapNotUsedForSmallAllocations) {
1539 E : EnableLargeBlockHeap(GetPageSize());
1540 E : ScopedHeap heap(heap_manager_);
1541 :
1542 E : const size_t kAllocSize = 0x100;
1543 E : void* alloc = heap.Allocate(kAllocSize);
1544 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1545 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1546 :
1547 : // Get the heap_id from the block trailer.
1548 E : BlockInfo block_info = {};
1549 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1550 :
1551 : {
1552 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1553 : // The provided heap ID should be the one in the block trailer.
1554 E : EXPECT_EQ(heap.Id(), block_info.trailer->heap_id);
1555 E : }
1556 :
1557 E : EXPECT_TRUE(heap.Free(alloc));
1558 E : }
1559 :
1560 E : TEST_F(BlockHeapManagerTest, AllocationFilterFlag) {
1561 E : EXPECT_NE(TLS_OUT_OF_INDEXES, heap_manager_->allocation_filter_flag_tls_);
1562 E : heap_manager_->set_allocation_filter_flag(true);
1563 E : EXPECT_TRUE(heap_manager_->allocation_filter_flag());
1564 E : heap_manager_->set_allocation_filter_flag(false);
1565 E : EXPECT_FALSE(heap_manager_->allocation_filter_flag());
1566 E : heap_manager_->set_allocation_filter_flag(true);
1567 E : EXPECT_TRUE(heap_manager_->allocation_filter_flag());
1568 E : }
1569 :
1570 : namespace {
1571 :
1572 E : size_t CountLockedHeaps(HeapInterface** heaps) {
1573 E : size_t i = 0;
1574 E : while (heaps[i] != nullptr) {
1575 E : ++i;
1576 E : }
1577 E : return i;
1578 E : }
1579 :
1580 : } // namespace
1581 :
1582 E : TEST_F(BlockHeapManagerTest, BestEffortLockAllNoLocksHeld) {
1583 E : heap_manager_->BestEffortLockAll();
1584 E : EXPECT_EQ(CountLockedHeaps(heap_manager_->locked_heaps_),
1585 E : heap_manager_->heaps_.size());
1586 E : heap_manager_->UnlockAll();
1587 E : }
1588 :
1589 : namespace {
1590 :
1591 : // A helper thread runner for acquiring a HeapInterface lock for a certain
1592 : // amount of time.
1593 : class GrabHeapLockRunner : public base::DelegateSimpleThread::Delegate {
1594 : public:
1595 E : explicit GrabHeapLockRunner(HeapInterface* heap)
1596 E : : heap_(heap), cv_(&cv_lock_), acquired_(false), release_(false) {
1597 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), heap);
1598 E : }
1599 :
1600 E : virtual void Run() {
1601 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), heap_);
1602 E : heap_->Lock();
1603 E : SignalAcquired();
1604 E : WaitRelease();
1605 E : heap_->Unlock();
1606 E : }
1607 :
1608 : // Waits until |acquired| is true.
1609 E : void WaitAcquired() {
1610 E : while (true) {
1611 E : base::AutoLock auto_lock(cv_lock_);
1612 E : if (acquired_)
1613 E : return;
1614 i : cv_.Wait();
1615 i : }
1616 E : }
1617 :
1618 : // To be called externally to notify this runner that the lock may be
1619 : // released and the thread torn down.
1620 E : void SignalRelease() {
1621 E : base::AutoLock auto_lock(cv_lock_);
1622 E : release_ = true;
1623 E : cv_.Broadcast();
1624 E : }
1625 :
1626 : private:
1627 : // Notifies external observers that the lock has been acquired.
1628 E : void SignalAcquired() {
1629 E : base::AutoLock auto_lock(cv_lock_);
1630 E : acquired_ = true;
1631 E : cv_.Broadcast();
1632 E : }
1633 :
1634 : // Waits until |release| is true.
1635 E : void WaitRelease() {
1636 E : while (true) {
1637 E : base::AutoLock auto_lock(cv_lock_);
1638 E : if (release_)
1639 E : return;
1640 E : cv_.Wait();
1641 E : }
1642 E : }
1643 :
1644 : HeapInterface* heap_;
1645 : base::Lock cv_lock_;
1646 : base::ConditionVariable cv_;
1647 : bool acquired_;
1648 : bool release_;
1649 :
1650 : DISALLOW_COPY_AND_ASSIGN(GrabHeapLockRunner);
1651 : };
1652 :
1653 : } // namespace
1654 :
1655 E : TEST_F(BlockHeapManagerTest, BestEffortLockAllOneHeapLockHeld) {
1656 E : ASSERT_FALSE(heap_manager_->heaps_.empty());
1657 E : GrabHeapLockRunner runner(heap_manager_->heaps_.begin()->first);
1658 E : base::DelegateSimpleThread thread(&runner, "GrabHeapLockRunner");
1659 E : thread.Start();
1660 E : runner.WaitAcquired();
1661 E : heap_manager_->BestEffortLockAll();
1662 :
1663 : // Expect all but one heap lock to have been acquired.
1664 E : EXPECT_EQ(CountLockedHeaps(heap_manager_->locked_heaps_),
1665 E : heap_manager_->heaps_.size() - 1);
1666 E : heap_manager_->UnlockAll();
1667 E : runner.SignalRelease();
1668 E : thread.Join();
1669 E : }
1670 :
1671 : // These functions are tested explicitly because the AsanRuntime reaches in
1672 : // to use them.
1673 :
1674 E : TEST_F(BlockHeapManagerTest, IsValidHeapIdUnlocked) {
1675 E : ASSERT_FALSE(heap_manager_->heaps_.empty());
1676 E : EXPECT_FALSE(heap_manager_->IsValidHeapIdUnlocked(0xDEADBEEF, false));
1677 E : for (auto& hq_pair : heap_manager_->heaps_) {
1678 E : TestBlockHeapManager::HeapQuarantinePair* hq = &hq_pair;
1679 : TestBlockHeapManager::HeapId heap_id =
1680 E : reinterpret_cast<TestBlockHeapManager::HeapId>(hq);
1681 E : EXPECT_TRUE(heap_manager_->IsValidHeapIdUnlocked(heap_id, false));
1682 E : }
1683 E : }
1684 :
1685 E : TEST_F(BlockHeapManagerTest, GetHeapTypeUnlocked) {
1686 E : ASSERT_FALSE(heap_manager_->heaps_.empty());
1687 E : for (auto& hq_pair : heap_manager_->heaps_) {
1688 E : TestBlockHeapManager::HeapQuarantinePair* hq = &hq_pair;
1689 : TestBlockHeapManager::HeapId heap_id =
1690 E : reinterpret_cast<TestBlockHeapManager::HeapId>(hq);
1691 E : EXPECT_NE(kUnknownHeapType, heap_manager_->GetHeapTypeUnlocked(heap_id));
1692 E : }
1693 E : }
1694 :
1695 E : TEST_F(BlockHeapManagerTest, ComputeRelativeStackId) {
1696 : // This test is done here and not in stack_capture_unittest, as the latter
1697 : // doesn't have the provision for faking the module address and would
1698 : // therefore ignore all the frames.
1699 E : common::StackCapture stack;
1700 E : stack.InitFromStack();
1701 :
1702 E : EXPECT_NE(0U, stack.relative_stack_id());
1703 E : }
1704 :
1705 E : TEST_F(BlockHeapManagerTest, EnableDeferredFreeThreadTest) {
1706 E : ScopedHeap heap(heap_manager_);
1707 E : ASSERT_FALSE(heap_manager_->IsDeferredFreeThreadRunning());
1708 E : heap_manager_->EnableDeferredFreeThread();
1709 E : ASSERT_TRUE(heap_manager_->IsDeferredFreeThreadRunning());
1710 E : heap_manager_->DisableDeferredFreeThread();
1711 E : ASSERT_FALSE(heap_manager_->IsDeferredFreeThreadRunning());
1712 E : }
1713 :
1714 E : TEST_F(BlockHeapManagerTest, DeferredFreeThreadTest) {
1715 E : const size_t kAllocSize = 100;
1716 E : const size_t kTargetMaxYellow = 10;
1717 E : size_t real_alloc_size = GetAllocSize(kAllocSize);
1718 E : ScopedHeap heap(heap_manager_);
1719 :
1720 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1721 E : parameters.quarantine_size = real_alloc_size * kTargetMaxYellow;
1722 E : heap_manager_->set_parameters(parameters);
1723 :
1724 : size_t max_size_yellow =
1725 E : heap_manager_->shared_quarantine_.GetMaxSizeForColorForTesting(YELLOW) /
1726 : real_alloc_size;
1727 :
1728 E : ASSERT_EQ(kTargetMaxYellow, max_size_yellow);
1729 :
1730 : // Blocks the callback until it gets signaled.
1731 E : base::WaitableEvent deferred_free_callback_start(false, false);
1732 : // Gets signaled by the callback this it's done executing.
1733 E : base::WaitableEvent deferred_free_callback_end(false, false);
1734 E : heap_manager_->EnableDeferredFreeWithSync(&deferred_free_callback_start,
1735 : &deferred_free_callback_end);
1736 E : ASSERT_TRUE(heap_manager_->IsDeferredFreeThreadRunning());
1737 :
1738 : // Overshoot the YELLOW size (into RED) then start and wait for the callback
1739 : // to be executed. The quarantine should to be back to GREEN.
1740 E : for (int i = 0; i < max_size_yellow + 1; i++) {
1741 E : void* heap_mem = heap.Allocate(kAllocSize);
1742 E : ASSERT_NE(static_cast<void*>(nullptr), heap_mem);
1743 E : heap.Free(heap_mem);
1744 E : }
1745 :
1746 E : size_t current_size = heap_manager_->shared_quarantine_.GetSizeForTesting();
1747 E : ASSERT_EQ(RED,
1748 E : heap_manager_->shared_quarantine_.GetQuarantineColor(current_size));
1749 :
1750 : // Signal the callback to execute and for it to finish.
1751 E : deferred_free_callback_start.Signal();
1752 E : deferred_free_callback_end.Wait();
1753 :
1754 E : current_size = heap_manager_->shared_quarantine_.GetSizeForTesting();
1755 E : EXPECT_EQ(GREEN,
1756 E : heap_manager_->shared_quarantine_.GetQuarantineColor(current_size));
1757 :
1758 E : heap_manager_->DisableDeferredFreeThread();
1759 E : EXPECT_FALSE(heap_manager_->IsDeferredFreeThreadRunning());
1760 E : }
1761 :
1762 : namespace {
1763 :
1764 : bool ShadowIsConsistentPostAlloc(
1765 : Shadow* shadow, const void* alloc, size_t size) {
1766 : uintptr_t index = reinterpret_cast<uintptr_t>(alloc);
1767 : index >>= kShadowRatioLog;
1768 : uintptr_t index_end = index + (size >> kShadowRatioLog);
1769 : for (size_t i = index; i < index_end; ++i) {
1770 : if (shadow->shadow()[i] != ShadowMarker::kHeapAddressableMarker)
1771 : return false;
1772 : }
1773 : return true;
1774 : }
1775 :
1776 : bool ShadowIsConsistentPostFree(
1777 : Shadow* shadow, const void* alloc, size_t size) {
1778 : DCHECK_NE(static_cast<Shadow*>(nullptr), shadow);
1779 : uintptr_t index = reinterpret_cast<uintptr_t>(alloc);
1780 : index >>= kShadowRatioLog;
1781 : uintptr_t index_end = index + (size >> kShadowRatioLog);
1782 :
1783 : uint8_t m = shadow->shadow()[index];
1784 : if (m != ShadowMarker::kHeapAddressableMarker &&
1785 : m != ShadowMarker::kAsanReservedMarker &&
1786 : m != ShadowMarker::kHeapFreedMarker) {
1787 : return false;
1788 : }
1789 :
1790 : // We expect green memory only for large allocations which are directly
1791 : // mapped. Small allocations should be returned to a common pool and
1792 : // marked as reserved.CtMalloc heap.
1793 : if (m == ShadowMarker::kHeapAddressableMarker && size < 1 * 1024 * 1024)
1794 : return false;
1795 :
1796 : for (size_t i = index; i < index_end; ++i) {
1797 : if (shadow->shadow()[index] != m)
1798 : return false;
1799 : }
1800 : return true;
1801 : }
1802 :
1803 : class BlockHeapManagerIntegrationTest : public testing::Test {
1804 : public:
1805 : BlockHeapManagerIntegrationTest()
1806 : : shadow_() {
1807 : }
1808 :
1809 : void SetUp() override {
1810 : shadow_.SetUp();
1811 : ASSERT_TRUE(shadow_.IsClean());
1812 : }
1813 :
1814 : void TearDown() override {
1815 : ASSERT_TRUE(shadow_.IsClean());
1816 : shadow_.TearDown();
1817 : }
1818 :
1819 : testing::DebugShadow shadow_;
1820 : };
1821 :
1822 : } // namespace
1823 :
1824 : } // namespace heap_managers
1825 : } // namespace asan
1826 : } // namespace agent
|