1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/heap_managers/block_heap_manager.h"
16 :
17 : #include <vector>
18 :
19 : #include "base/bind.h"
20 : #include "base/compiler_specific.h"
21 : #include "base/rand_util.h"
22 : #include "base/sha1.h"
23 : #include "base/debug/alias.h"
24 : #include "base/synchronization/condition_variable.h"
25 : #include "base/synchronization/lock.h"
26 : #include "base/test/test_reg_util_win.h"
27 : #include "base/threading/simple_thread.h"
28 : #include "gmock/gmock.h"
29 : #include "gtest/gtest.h"
30 : #include "syzygy/agent/asan/block.h"
31 : #include "syzygy/agent/asan/heap.h"
32 : #include "syzygy/agent/asan/page_protection_helpers.h"
33 : #include "syzygy/agent/asan/rtl_impl.h"
34 : #include "syzygy/agent/asan/runtime.h"
35 : #include "syzygy/agent/asan/stack_capture_cache.h"
36 : #include "syzygy/agent/asan/unittest_util.h"
37 : #include "syzygy/agent/asan/heaps/internal_heap.h"
38 : #include "syzygy/agent/asan/heaps/large_block_heap.h"
39 : #include "syzygy/agent/asan/heaps/simple_block_heap.h"
40 : #include "syzygy/agent/asan/heaps/win_heap.h"
41 : #include "syzygy/agent/asan/heaps/zebra_block_heap.h"
42 : #include "syzygy/agent/asan/memory_notifiers/null_memory_notifier.h"
43 : #include "syzygy/assm/assembler.h"
44 : #include "syzygy/assm/buffer_serializer.h"
45 : #include "syzygy/common/asan_parameters.h"
46 :
47 : namespace agent {
48 : namespace asan {
49 : namespace heap_managers {
50 :
51 : namespace {
52 :
53 : using heaps::ZebraBlockHeap;
54 : using testing::IsAccessible;
55 : using testing::IsNotAccessible;
56 : using testing::ScopedBlockAccess;
57 :
58 : typedef BlockHeapManager::HeapId HeapId;
59 :
60 E : testing::DummyHeap dummy_heap;
61 :
62 : // As the code that computes the relative stack IDs ignores any frames from
63 : // its own module and as we statically link with the SyzyAsan CRT, all the
64 : // allocations or crashes coming from these tests will have the same
65 : // relative stack ID by default. To fix this we dynamically generate code that
66 : // does the allocation. We then use the ComputeRelativeStackId seam to indicate
67 : // that the frame is in an entirely different dummy module.
68 : class AllocateFromHeapManagerHelper {
69 : public:
70 E : AllocateFromHeapManagerHelper(BlockHeapManager* heap_manager,
71 : HeapId heap_id,
72 : size_t offset)
73 : : heap_manager_(heap_manager), heap_id_(heap_id), offset_(offset) {
74 E : DCHECK_NE(static_cast<BlockHeapManager*>(nullptr), heap_manager);
75 E : DCHECK_LT(offset, GetPageSize());
76 :
77 : // Allocates a page that has the executable bit set.
78 : allocation_code_page_ = ::VirtualAlloc(nullptr, GetPageSize(),
79 E : MEM_COMMIT, PAGE_EXECUTE_READWRITE);
80 E : EXPECT_NE(nullptr, allocation_code_page_);
81 :
82 : assm::BufferSerializer bs(
83 : reinterpret_cast<uint8*>(allocation_code_page_) + offset,
84 E : GetPageSize() - offset);
85 : assm::AssemblerImpl assembler(
86 E : reinterpret_cast<uint32>(allocation_code_page_) + offset, &bs);
87 :
88 E : assembler.push(assm::ebp);
89 E : assembler.mov(assm::ebp, assm::esp);
90 :
91 : // Push the parameters on the stack.
92 : assembler.push(assm::AssemblerImpl::Operand(assm::ebp,
93 E : assm::AssemblerImpl::Displacement(0x10, assm::kSize8Bit)));
94 : assembler.push(assm::AssemblerImpl::Operand(assm::ebp,
95 E : assm::AssemblerImpl::Displacement(0x0C, assm::kSize8Bit)));
96 : assembler.push(assm::AssemblerImpl::Operand(assm::ebp,
97 E : assm::AssemblerImpl::Displacement(0x08, assm::kSize8Bit)));
98 :
99 : // Call the AllocateFromHeapManager function.
100 : assembler.call(assm::AssemblerImpl::Immediate(
101 : reinterpret_cast<uint32>(&AllocateFromHeapManager),
102 E : assm::kSize32Bit, NULL));
103 E : assembler.mov(assm::esp, assm::ebp);
104 E : assembler.pop(assm::ebp);
105 E : assembler.ret();
106 :
107 : agent::common::StackCapture::AddFalseModule(
108 E : "dummy_module.dll", allocation_code_page_, GetPageSize());
109 E : }
110 :
111 E : ~AllocateFromHeapManagerHelper() {
112 E : EXPECT_TRUE(::VirtualFree(allocation_code_page_, 0, MEM_RELEASE));
113 E : allocation_code_page_ = nullptr;
114 E : agent::common::StackCapture::ClearFalseModules();
115 E : }
116 :
117 E : void* operator()(size_t bytes) {
118 : using AllocFunctionPtr = void*(*)(BlockHeapManager* heap_manager,
119 : HeapId heap_id,
120 : size_t bytes);
121 E : uint8* func = reinterpret_cast<uint8*>(allocation_code_page_) + offset_;
122 : return reinterpret_cast<AllocFunctionPtr>(func)(
123 E : heap_manager_, heap_id_, bytes);
124 E : }
125 :
126 : private:
127 : // Do an allocation via a heap manager.
128 : static void* AllocateFromHeapManager(BlockHeapManager* heap_manager,
129 : HeapId heap_id,
130 E : size_t bytes) {
131 E : EXPECT_NE(nullptr, heap_manager);
132 E : return heap_manager->Allocate(heap_id, bytes);
133 E : }
134 :
135 : // The page that contains the dynamically generated code that does an
136 : // allocation via a heap manager.
137 : LPVOID allocation_code_page_;
138 :
139 : // The heap that serves the allocation.
140 : HeapId heap_id_;
141 :
142 : // The heap manager that owns the heap.
143 : BlockHeapManager* heap_manager_;
144 :
145 : // The offset within the page where the function starts. Different values of
146 : // this will cause different relative stack ID values.
147 : size_t offset_;
148 : };
149 :
150 : // A fake ZebraBlockHeap to simplify unit testing.
151 : // Wrapper with switches to enable/disable the quarantine and accept/refuse
152 : // allocations.
153 : class TestZebraBlockHeap : public heaps::ZebraBlockHeap {
154 : public:
155 : using ZebraBlockHeap::set_quarantine_ratio;
156 : using ZebraBlockHeap::quarantine_ratio;
157 : using ZebraBlockHeap::slab_count_;
158 :
159 : // Constructor.
160 E : explicit TestZebraBlockHeap(MemoryNotifierInterface* memory_notifier)
161 : : ZebraBlockHeap(1024 * 1024, memory_notifier, &dummy_heap) {
162 E : refuse_allocations_ = false;
163 E : refuse_push_ = false;
164 E : }
165 :
166 : // Virtual destructor.
167 E : virtual ~TestZebraBlockHeap() { }
168 :
169 : // Wrapper that allows easily disabling allocations.
170 : void* AllocateBlock(size_t size,
171 : size_t min_left_redzone_size,
172 : size_t min_right_redzone_size,
173 E : BlockLayout* layout) override {
174 E : if (refuse_allocations_)
175 E : return nullptr;
176 : return ZebraBlockHeap::AllocateBlock(size,
177 : min_left_redzone_size,
178 : min_right_redzone_size,
179 E : layout);
180 E : }
181 :
182 : // Wrapper that allows easily disabling the insertion of new blocks in the
183 : // quarantine.
184 E : bool Push(const CompactBlockInfo& info) override {
185 E : if (refuse_push_)
186 E : return false;
187 E : return ZebraBlockHeap::Push(info);
188 E : }
189 :
190 : // Enable/Disable future allocations.
191 E : void set_refuse_allocations(bool value) {
192 E : refuse_allocations_ = value;
193 E : }
194 :
195 : // Enable/Disable the insertion of blocks in the quarantine.
196 E : void set_refuse_push(bool value) {
197 E : refuse_push_ = value;
198 E : }
199 :
200 : protected:
201 : bool refuse_allocations_;
202 : bool refuse_push_;
203 :
204 : private:
205 : DISALLOW_COPY_AND_ASSIGN(TestZebraBlockHeap);
206 : };
207 :
208 : // A derived class to expose protected members for unit-testing.
209 : class TestBlockHeapManager : public BlockHeapManager {
210 : public:
211 : using BlockHeapManager::HeapQuarantinePair;
212 :
213 : using BlockHeapManager::FreePotentiallyCorruptBlock;
214 : using BlockHeapManager::GetHeapId;
215 : using BlockHeapManager::GetHeapFromId;
216 : using BlockHeapManager::GetHeapTypeUnlocked;
217 : using BlockHeapManager::GetQuarantineFromId;
218 : using BlockHeapManager::HeapMetadata;
219 : using BlockHeapManager::HeapQuarantineMap;
220 : using BlockHeapManager::IsValidHeapIdUnlocked;
221 : using BlockHeapManager::SetHeapErrorCallback;
222 : using BlockHeapManager::ShardedBlockQuarantine;
223 : using BlockHeapManager::TrimQuarantine;
224 :
225 : using BlockHeapManager::allocation_filter_flag_tls_;
226 : using BlockHeapManager::heaps_;
227 : using BlockHeapManager::large_block_heap_id_;
228 : using BlockHeapManager::locked_heaps_;
229 : using BlockHeapManager::parameters_;
230 : using BlockHeapManager::zebra_block_heap_;
231 : using BlockHeapManager::zebra_block_heap_id_;
232 :
233 : // A derived class to expose protected members for unit-testing. This has to
234 : // be nested into this one because ShardedBlockQuarantine accesses some
235 : // protected fields of BlockHeapManager.
236 : //
237 : // This class should only expose some members or expose new functions, no new
238 : // member should be added.
239 : class TestQuarantine : public ShardedBlockQuarantine {
240 : public:
241 : using ShardedBlockQuarantine::Node;
242 : using ShardedBlockQuarantine::kShardingFactor;
243 : using ShardedBlockQuarantine::heads_;
244 : };
245 :
246 : // Constructor.
247 : TestBlockHeapManager(Shadow* shadow,
248 : StackCaptureCache* stack_cache,
249 : MemoryNotifierInterface* memory_notifier)
250 : : BlockHeapManager(shadow, stack_cache, memory_notifier) {
251 : }
252 :
253 : // Removes the heap with the given ID.
254 : void RemoveHeapById(HeapId heap_id) {
255 : if (heap_id == 0)
256 : return;
257 : BlockHeapInterface* heap = GetHeapFromId(heap_id);
258 : delete heap;
259 : EXPECT_EQ(1, heaps_.erase(heap));
260 : }
261 :
262 : // Wrapper for the set_parameters method. This also takes care of
263 : // reinitializing the variables that are usually initialized in the
264 : // constructor of a BlockHeapManager.
265 E : void SetParameters(const ::common::AsanParameters& params) {
266 : // Set the parameters.
267 : {
268 E : base::AutoLock lock(lock_);
269 E : parameters_ = params;
270 E : }
271 :
272 E : PropagateParameters();
273 E : }
274 : };
275 :
276 : // A derived class to expose protected members for unit-testing.
277 : class TestAsanRuntime : public agent::asan::AsanRuntime {
278 : public:
279 : using agent::asan::AsanRuntime::heap_manager_;
280 : };
281 :
282 : // A utility class for manipulating a heap. This automatically deletes the heap
283 : // and its content in the destructor and provides some utility functions.
284 : class ScopedHeap {
285 : public:
286 : typedef TestBlockHeapManager::TestQuarantine TestQuarantine;
287 :
288 : // Constructor.
289 E : explicit ScopedHeap(TestBlockHeapManager* heap_manager)
290 : : heap_manager_(heap_manager) {
291 E : heap_id_ = heap_manager->CreateHeap();
292 E : EXPECT_NE(0u, heap_id_);
293 : alloc_functor_.reset(new AllocateFromHeapManagerHelper(heap_manager,
294 : heap_id_,
295 E : 13));
296 E : }
297 :
298 : // Destructor. Destroy the heap, this will flush its quarantine and delete all
299 : // the structures associated with this heap.
300 E : ~ScopedHeap() {
301 E : ReleaseHeap();
302 E : }
303 :
304 E : void ReleaseHeap() {
305 E : if (heap_id_ != 0) {
306 E : EXPECT_TRUE(heap_manager_->DestroyHeap(heap_id_));
307 E : heap_id_ = 0;
308 : }
309 E : }
310 :
311 : // Retrieves the quarantine associated with this heap.
312 E : BlockQuarantineInterface* GetQuarantine() {
313 E : return heap_manager_->GetQuarantineFromId(heap_id_);
314 E : }
315 :
316 : // Allocate a block of @p size bytes.
317 E : void* Allocate(size_t size) {
318 E : return (*alloc_functor_)(size);
319 E : }
320 :
321 : // Free the block @p mem.
322 E : bool Free(void* mem) {
323 E : return heap_manager_->Free(heap_id_, mem);
324 E : }
325 :
326 : // Flush the quarantine of this heap.
327 E : void FlushQuarantine() {
328 E : BlockQuarantineInterface* quarantine = GetQuarantine();
329 E : EXPECT_NE(static_cast<BlockQuarantineInterface*>(nullptr),
330 : quarantine);
331 E : BlockQuarantineInterface::ObjectVector blocks_to_free;
332 E : quarantine->Empty(&blocks_to_free);
333 : BlockQuarantineInterface::ObjectVector::iterator iter_block =
334 E : blocks_to_free.begin();
335 E : for (; iter_block != blocks_to_free.end(); ++iter_block) {
336 E : const CompactBlockInfo& compact = *iter_block;
337 E : BlockInfo expanded = {};
338 E : ConvertBlockInfo(compact, &expanded);
339 E : CHECK(heap_manager_->FreePotentiallyCorruptBlock(&expanded));
340 E : }
341 E : }
342 :
343 : // Returns the underlying heap ID.
344 E : HeapId Id() { return heap_id_; }
345 :
346 : // Determines if the address @p mem corresponds to a block in the quarantine
347 : // of this heap.
348 E : bool InQuarantine(const void* mem) {
349 : // As we'll cast an AsanShardedQuarantine directly into a TestQuarantine
350 : // there shouldn't be any new field defined by this class, this should only
351 : // act as an interface allowing to access some private fields.
352 : static_assert(sizeof(TestQuarantine) ==
353 : sizeof(TestBlockHeapManager::ShardedBlockQuarantine),
354 : "TestQuarantine isn't an interface.");
355 : TestQuarantine* test_quarantine =
356 E : reinterpret_cast<TestQuarantine*>(GetQuarantine());
357 E : EXPECT_NE(static_cast<TestQuarantine*>(nullptr), test_quarantine);
358 : // Search through all of the shards.
359 E : for (size_t i = 0; i < test_quarantine->kShardingFactor; ++i) {
360 : // Search through all blocks in each shard.
361 E : TestQuarantine::Node* current_node = test_quarantine->heads_[i];
362 E : while (current_node != nullptr) {
363 : const uint8* body =
364 : reinterpret_cast<const uint8*>(current_node->object.header) +
365 E : current_node->object.header_size;
366 E : if (body == mem) {
367 E : EXPECT_TRUE(
368 : current_node->object.header->state == QUARANTINED_BLOCK ||
369 : current_node->object.header->state == QUARANTINED_FLOODED_BLOCK);
370 E : return true;
371 : }
372 E : current_node = current_node->next;
373 E : }
374 E : }
375 :
376 E : return false;
377 E : }
378 :
379 : // Returns the heap supported features.
380 E : uint32 GetHeapFeatures() {
381 E : return heap_manager_->GetHeapFromId(heap_id_)->GetHeapFeatures();
382 E : }
383 :
384 : private:
385 : // The heap manager owning the underlying heap.
386 : TestBlockHeapManager* heap_manager_;
387 :
388 : // The underlying heap.
389 : HeapId heap_id_;
390 :
391 : // The allocation functor.
392 : scoped_ptr<AllocateFromHeapManagerHelper> alloc_functor_;
393 : };
394 :
395 : // A value-parameterized test class for testing the BlockHeapManager class.
396 : class BlockHeapManagerTest : public testing::TestWithAsanRuntime {
397 : public:
398 : typedef TestBlockHeapManager::ShardedBlockQuarantine ShardedBlockQuarantine;
399 : typedef testing::TestWithAsanRuntime Super;
400 :
401 : BlockHeapManagerTest()
402 : : TestWithAsanRuntime(&test_runtime_), heap_manager_(),
403 E : test_zebra_block_heap_(nullptr) {
404 E : }
405 :
406 E : void SetUp() override {
407 E : Super::SetUp();
408 : heap_manager_ = reinterpret_cast<TestBlockHeapManager*>(
409 E : test_runtime_.heap_manager_.get());
410 :
411 E : override_manager_.OverrideRegistry(RegistryCache::kRegistryRootKey);
412 :
413 : // Set the error callback that the manager will use.
414 : heap_manager_->SetHeapErrorCallback(
415 E : base::Bind(&BlockHeapManagerTest::OnHeapError, base::Unretained(this)));
416 :
417 : ::common::AsanParameters params;
418 E : ::common::SetDefaultAsanParameters(¶ms);
419 E : heap_manager_->SetParameters(params);
420 E : }
421 :
422 E : void TearDown() override {
423 E : heap_manager_ = nullptr;
424 E : Super::TearDown();
425 E : }
426 :
427 E : void OnHeapError(AsanErrorInfo* error) {
428 E : errors_.push_back(*error);
429 E : }
430 :
431 : // Calculates the Asan size for an allocation of @p user_size bytes.
432 E : size_t GetAllocSize(size_t user_size) {
433 E : BlockLayout layout = {};
434 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, user_size, 0,
435 : heap_manager_->parameters().trailer_padding_size + sizeof(BlockTrailer),
436 : &layout));
437 E : return layout.block_size;
438 E : }
439 :
440 E : void EnableTestZebraBlockHeap() {
441 : // Erase previous ZebraBlockHeap.
442 E : if (heap_manager_->zebra_block_heap_ != 0) {
443 i : heap_manager_->heaps_.erase(heap_manager_->zebra_block_heap_);
444 i : delete heap_manager_->zebra_block_heap_;
445 : }
446 : // Plug a mock ZebraBlockHeap by default disabled.
447 : test_zebra_block_heap_ = new TestZebraBlockHeap(
448 E : runtime_->memory_notifier());
449 E : heap_manager_->zebra_block_heap_ = test_zebra_block_heap_;
450 : TestBlockHeapManager::HeapMetadata heap_metadata =
451 E : { test_zebra_block_heap_, false };
452 : auto result = heap_manager_->heaps_.insert(std::make_pair(
453 E : test_zebra_block_heap_, heap_metadata));
454 E : heap_manager_->zebra_block_heap_id_ = heap_manager_->GetHeapId(result);
455 :
456 : // Turn on the zebra_block_heap_enabled flag.
457 E : ::common::AsanParameters params = heap_manager_->parameters();
458 E : params.enable_zebra_block_heap = true;
459 E : heap_manager_->set_parameters(params);
460 E : }
461 :
462 E : void EnableLargeBlockHeap(size_t large_allocation_threshold) {
463 E : ::common::AsanParameters params = heap_manager_->parameters();
464 E : params.enable_large_block_heap = true;
465 E : params.large_allocation_threshold = large_allocation_threshold;
466 E : heap_manager_->set_parameters(params);
467 E : CHECK_NE(0u, heap_manager_->large_block_heap_id_);
468 E : }
469 :
470 : // Verifies that [alloc, alloc + size) is accessible, and that
471 : // [alloc - 1] and [alloc+size] are poisoned.
472 E : void VerifyAllocAccess(void* alloc, size_t size) {
473 E : uint8* mem = reinterpret_cast<uint8*>(alloc);
474 E : ASSERT_FALSE(runtime_->shadow()->IsAccessible(mem - 1));
475 E : ASSERT_TRUE(runtime_->shadow()->IsLeftRedzone(mem - 1));
476 E : for (size_t i = 0; i < size; ++i)
477 E : ASSERT_TRUE(runtime_->shadow()->IsAccessible(mem + i));
478 E : ASSERT_FALSE(runtime_->shadow()->IsAccessible(mem + size));
479 E : }
480 :
481 : // Verifies that [alloc-1, alloc+size] is poisoned.
482 E : void VerifyFreedAccess(void* alloc, size_t size) {
483 E : uint8* mem = reinterpret_cast<uint8*>(alloc);
484 E : ASSERT_FALSE(runtime_->shadow()->IsAccessible(mem - 1));
485 E : ASSERT_TRUE(runtime_->shadow()->IsLeftRedzone(mem - 1));
486 E : for (size_t i = 0; i < size; ++i) {
487 E : ASSERT_FALSE(runtime_->shadow()->IsAccessible(mem + i));
488 E : ASSERT_EQ(runtime_->shadow()->GetShadowMarkerForAddress(mem + i),
489 : kHeapFreedMarker);
490 E : }
491 E : ASSERT_FALSE(runtime_->shadow()->IsAccessible(mem + size));
492 E : }
493 :
494 : void QuarantineAltersBlockContents(
495 : float quarantine_flood_fill_rate,
496 : size_t iterations,
497 : size_t min_flood_filled,
498 E : size_t max_flood_filled) {
499 E : const size_t kAllocSize = 13;
500 E : ScopedHeap heap(heap_manager_);
501 : // Ensure that the quarantine is large enough to keep this block.
502 E : ::common::AsanParameters parameters = heap_manager_->parameters();
503 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
504 E : parameters.quarantine_flood_fill_rate = quarantine_flood_fill_rate;
505 E : heap_manager_->set_parameters(parameters);
506 :
507 : // This test gets run repeatedly, and it is expected that some portion of
508 : // the blocks contents will be flood-filled.
509 E : size_t flood_filled_count = 0;
510 E : for (size_t i = 0; i < iterations; ++i) {
511 : // Allocate a block and fill it with random data.
512 E : void* mem = heap.Allocate(kAllocSize);
513 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
514 E : base::RandBytes(mem, kAllocSize);
515 :
516 : // Hash the contents of the block before being quarantined.
517 E : unsigned char sha1_before[base::kSHA1Length] = {};
518 : base::SHA1HashBytes(reinterpret_cast<unsigned char*>(mem),
519 : kAllocSize,
520 E : sha1_before);
521 :
522 : // Free the block and ensure it gets quarantined.
523 : BlockHeader* header = BlockGetHeaderFromBody(
524 E : reinterpret_cast<BlockBody*>(mem));
525 E : ASSERT_TRUE(heap.Free(mem));
526 E : EXPECT_TRUE(
527 : static_cast<BlockState>(header->state) == QUARANTINED_BLOCK ||
528 : static_cast<BlockState>(header->state) == QUARANTINED_FLOODED_BLOCK);
529 :
530 E : if (static_cast<BlockState>(header->state) == QUARANTINED_BLOCK) {
531 : // If the block is quarantined and not flood-filled then ensure that the
532 : // contents have not changed.
533 E : unsigned char sha1_after[base::kSHA1Length] = {};
534 : base::SHA1HashBytes(reinterpret_cast<unsigned char*>(mem),
535 : kAllocSize,
536 E : sha1_after);
537 E : EXPECT_EQ(0, memcmp(sha1_before, sha1_after, base::kSHA1Length));
538 E : } else {
539 : // If the block is quarantined and flood-filled then ensure that has
540 : // actually happened.
541 E : EXPECT_EQ(QUARANTINED_FLOODED_BLOCK,
542 : static_cast<BlockState>(header->state));
543 : BlockHeader* header = BlockGetHeaderFromBody(
544 E : reinterpret_cast<BlockBody*>(mem));
545 E : BlockInfo block_info = {};
546 E : EXPECT_TRUE(BlockInfoFromMemory(header, &block_info));
547 E : EXPECT_TRUE(BlockBodyIsFloodFilled(block_info));
548 E : ++flood_filled_count;
549 : }
550 :
551 : // Ensure the quarantine is flushed. Otherwise the next block to be
552 : // allocated might not even make it into the quarantine because a block
553 : // is randomly evicted.
554 E : heap.FlushQuarantine();
555 E : }
556 :
557 E : EXPECT_LE(min_flood_filled, flood_filled_count);
558 E : EXPECT_LE(flood_filled_count, max_flood_filled);
559 E : }
560 :
561 : protected:
562 : // The heap manager used in these tests.
563 : TestBlockHeapManager* heap_manager_;
564 :
565 : // Info about the last errors reported.
566 : std::vector<AsanErrorInfo> errors_;
567 :
568 : // The mock ZebraBlockHeap used in the tests.
569 : TestZebraBlockHeap* test_zebra_block_heap_;
570 :
571 : // The runtime used by those tests.
572 : TestAsanRuntime test_runtime_;
573 :
574 : // Prevent the tests from polluting the registry.
575 : registry_util::RegistryOverrideManager override_manager_;
576 : };
577 :
578 : } // namespace
579 :
580 E : TEST_F(BlockHeapManagerTest, AllocAndFree) {
581 E : const size_t kAllocSize = 17;
582 E : HeapId heap_id = heap_manager_->CreateHeap();
583 E : EXPECT_NE(0u, heap_id);
584 E : void* alloc = heap_manager_->Allocate(heap_id, kAllocSize);
585 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
586 E : EXPECT_LE(kAllocSize, heap_manager_->Size(heap_id, alloc));
587 E : EXPECT_TRUE(heap_manager_->Free(heap_id, alloc));
588 E : EXPECT_TRUE(heap_manager_->DestroyHeap(heap_id));
589 E : }
590 :
591 E : TEST_F(BlockHeapManagerTest, FreeNullPointer) {
592 E : HeapId heap_id = heap_manager_->CreateHeap();
593 E : EXPECT_NE(0u, heap_id);
594 E : EXPECT_TRUE(heap_manager_->Free(heap_id, static_cast<void*>(nullptr)));
595 E : EXPECT_TRUE(heap_manager_->DestroyHeap(heap_id));
596 E : }
597 :
598 E : TEST_F(BlockHeapManagerTest, FreeUnguardedAlloc) {
599 E : const size_t kAllocSize = 100;
600 E : ::common::AsanParameters params = heap_manager_->parameters();
601 E : params.allocation_guard_rate = 0.0;
602 E : heap_manager_->set_parameters(params);
603 :
604 E : ScopedHeap heap(heap_manager_);
605 :
606 E : void* heap_alloc = heap.Allocate(kAllocSize);
607 E : EXPECT_NE(static_cast<void*>(nullptr), heap_alloc);
608 :
609 E : void* process_heap_alloc = ::HeapAlloc(::GetProcessHeap(), 0, kAllocSize);
610 E : EXPECT_NE(static_cast<void*>(nullptr), process_heap_alloc);
611 :
612 : BlockHeapInterface* process_heap = heap_manager_->GetHeapFromId(
613 E : heap_manager_->process_heap());
614 E : void* process_heap_wrapper_alloc = process_heap->Allocate(kAllocSize);
615 E : EXPECT_NE(static_cast<void*>(nullptr), process_heap_wrapper_alloc);
616 :
617 E : EXPECT_TRUE(heap_manager_->Free(heap.Id(), heap_alloc));
618 : EXPECT_TRUE(heap_manager_->Free(heap_manager_->process_heap(),
619 E : process_heap_alloc));
620 : EXPECT_TRUE(heap_manager_->Free(heap_manager_->process_heap(),
621 E : process_heap_wrapper_alloc));
622 E : }
623 :
624 E : TEST_F(BlockHeapManagerTest, PopOnSetQuarantineMaxSize) {
625 E : const size_t kAllocSize = 100;
626 E : size_t real_alloc_size = GetAllocSize(kAllocSize);
627 E : ScopedHeap heap(heap_manager_);
628 E : void* mem = heap.Allocate(kAllocSize);
629 E : ASSERT_FALSE(heap.InQuarantine(mem));
630 :
631 E : ::common::AsanParameters parameters = heap_manager_->parameters();
632 E : parameters.quarantine_size = real_alloc_size;
633 E : heap_manager_->set_parameters(parameters);
634 :
635 E : ASSERT_TRUE(heap.Free(mem));
636 E : ASSERT_TRUE(heap.InQuarantine(mem));
637 :
638 : // We resize the quarantine to a smaller size, the block should pop out.
639 E : parameters.quarantine_size = real_alloc_size - 1;
640 E : heap_manager_->set_parameters(parameters);
641 E : ASSERT_FALSE(heap.InQuarantine(mem));
642 E : }
643 :
644 E : TEST_F(BlockHeapManagerTest, Quarantine) {
645 E : const size_t kAllocSize = 100;
646 E : size_t real_alloc_size = GetAllocSize(kAllocSize);
647 E : const size_t number_of_allocs = 16;
648 E : ScopedHeap heap(heap_manager_);
649 :
650 E : ::common::AsanParameters parameters = heap_manager_->parameters();
651 E : parameters.quarantine_size = real_alloc_size * number_of_allocs;
652 E : heap_manager_->set_parameters(parameters);
653 :
654 : // Allocate a bunch of blocks until exactly one is removed from the
655 : // quarantine.
656 E : std::vector<void*> blocks;
657 E : for (size_t i = 0; i < number_of_allocs + 1; ++i) {
658 E : void* mem = heap.Allocate(kAllocSize);
659 E : ASSERT_TRUE(mem != nullptr);
660 E : heap.Free(mem);
661 E : blocks.push_back(mem);
662 E : if (i < number_of_allocs)
663 E : ASSERT_TRUE(heap.InQuarantine(mem));
664 E : }
665 :
666 E : size_t blocks_in_quarantine = 0;
667 E : for (size_t i = 0; i < blocks.size(); ++i) {
668 E : if (heap.InQuarantine(blocks[i]))
669 E : ++blocks_in_quarantine;
670 E : }
671 E : EXPECT_EQ(number_of_allocs, blocks_in_quarantine);
672 E : }
673 :
674 E : TEST_F(BlockHeapManagerTest, QuarantineLargeBlock) {
675 E : const size_t kLargeAllocSize = 100;
676 E : const size_t kSmallAllocSize = 25;
677 E : size_t real_large_alloc_size = GetAllocSize(kLargeAllocSize);
678 E : size_t real_small_alloc_size = GetAllocSize(kSmallAllocSize);
679 :
680 E : ScopedHeap heap(heap_manager_);
681 E : ::common::AsanParameters parameters = heap_manager_->parameters();
682 E : parameters.quarantine_size = real_large_alloc_size;
683 E : parameters.quarantine_block_size = real_large_alloc_size;
684 E : heap_manager_->set_parameters(parameters);
685 :
686 : // A block larger than the quarantine should not make it in.
687 E : void* mem1 = heap.Allocate(real_large_alloc_size + 1);
688 E : ASSERT_NE(static_cast<void*>(nullptr), mem1);
689 E : EXPECT_TRUE(heap.Free(mem1));
690 E : EXPECT_FALSE(heap.InQuarantine(mem1));
691 E : EXPECT_EQ(0u, heap.GetQuarantine()->GetCount());
692 :
693 : // A smaller block should make it because our current max block size allows
694 : // it.
695 E : void* mem2 = heap.Allocate(kSmallAllocSize);
696 E : ASSERT_NE(static_cast<void*>(nullptr), mem2);
697 E : EXPECT_TRUE(heap.Free(mem2));
698 E : EXPECT_TRUE(heap.InQuarantine(mem2));
699 :
700 E : parameters.quarantine_block_size = real_small_alloc_size - 1;
701 E : heap_manager_->set_parameters(parameters);
702 :
703 : // A second small block should not make it in since we changed the block size.
704 : // However, the other block should remain in the quarantine.
705 E : void* mem3 = heap.Allocate(kSmallAllocSize);
706 E : ASSERT_NE(static_cast<void*>(nullptr), mem3);
707 E : EXPECT_TRUE(heap.Free(mem3));
708 E : EXPECT_TRUE(heap.InQuarantine(mem2));
709 E : EXPECT_FALSE(heap.InQuarantine(mem3));
710 E : }
711 :
712 E : TEST_F(BlockHeapManagerTest, UnpoisonsQuarantine) {
713 E : const size_t kAllocSize = 100;
714 E : const size_t real_alloc_size = GetAllocSize(kAllocSize);
715 :
716 E : ScopedHeap heap(heap_manager_);
717 E : ::common::AsanParameters parameters = heap_manager_->parameters();
718 E : parameters.quarantine_size = real_alloc_size;
719 E : heap_manager_->set_parameters(parameters);
720 :
721 : // Allocate a memory block and directly free it, this puts it in the
722 : // quarantine.
723 E : void* mem = heap.Allocate(kAllocSize);
724 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
725 E : ASSERT_TRUE(heap.Free(mem));
726 E : ASSERT_TRUE(heap.InQuarantine(mem));
727 :
728 : // Assert that the shadow memory has been correctly poisoned.
729 : intptr_t mem_start = reinterpret_cast<intptr_t>(BlockGetHeaderFromBody(
730 E : reinterpret_cast<BlockBody*>(mem)));
731 E : ASSERT_EQ(0, (mem_start & 7) );
732 E : size_t shadow_start = mem_start >> 3;
733 E : size_t shadow_alloc_size = real_alloc_size >> 3;
734 E : for (size_t i = shadow_start; i < shadow_start + shadow_alloc_size; ++i)
735 E : ASSERT_NE(kHeapAddressableMarker, runtime_->shadow()->shadow()[i]);
736 :
737 : // Flush the quarantine.
738 E : heap.FlushQuarantine();
739 :
740 : // Assert that the quarantine has been correctly unpoisoned.
741 E : for (size_t i = shadow_start; i < shadow_start + shadow_alloc_size; ++i) {
742 : if ((heap.GetHeapFeatures() &
743 E : HeapInterface::kHeapReportsReservations) != 0) {
744 i : ASSERT_EQ(kAsanReservedMarker, runtime_->shadow()->shadow()[i]);
745 i : } else {
746 E : ASSERT_EQ(kHeapAddressableMarker, runtime_->shadow()->shadow()[i]);
747 : }
748 E : }
749 E : }
750 :
751 E : TEST_F(BlockHeapManagerTest, QuarantineIsShared) {
752 E : const size_t kAllocSize = 100;
753 E : const size_t real_alloc_size = GetAllocSize(kAllocSize);
754 E : ScopedHeap heap_1(heap_manager_);
755 E : ScopedHeap heap_2(heap_manager_);
756 :
757 E : ASSERT_EQ(heap_1.GetQuarantine(), heap_2.GetQuarantine());
758 :
759 E : ::common::AsanParameters parameters = heap_manager_->parameters();
760 E : parameters.quarantine_size = real_alloc_size * 4;
761 E : heap_manager_->set_parameters(parameters);
762 :
763 E : void* heap_1_mem1 = heap_1.Allocate(kAllocSize);
764 E : ASSERT_NE(static_cast<void*>(nullptr), heap_1_mem1);
765 E : void* heap_1_mem2 = heap_1.Allocate(kAllocSize);
766 E : ASSERT_NE(static_cast<void*>(nullptr), heap_1_mem2);
767 E : void* heap_2_mem1 = heap_2.Allocate(kAllocSize);
768 E : ASSERT_NE(static_cast<void*>(nullptr), heap_2_mem1);
769 E : void* heap_2_mem2 = heap_2.Allocate(kAllocSize);
770 E : ASSERT_NE(static_cast<void*>(nullptr), heap_2_mem2);
771 :
772 E : EXPECT_TRUE(heap_1.Free(heap_1_mem1));
773 E : EXPECT_TRUE(heap_1.Free(heap_1_mem2));
774 E : EXPECT_TRUE(heap_2.Free(heap_2_mem1));
775 E : EXPECT_TRUE(heap_2.Free(heap_2_mem2));
776 :
777 E : EXPECT_TRUE(heap_1.InQuarantine(heap_1_mem1));
778 E : EXPECT_TRUE(heap_1.InQuarantine(heap_1_mem2));
779 E : EXPECT_TRUE(heap_2.InQuarantine(heap_2_mem1));
780 E : EXPECT_TRUE(heap_2.InQuarantine(heap_2_mem2));
781 :
782 E : BlockQuarantineInterface* quarantine = heap_1.GetQuarantine();
783 E : EXPECT_EQ(4, quarantine->GetCount());
784 E : heap_2.ReleaseHeap();
785 E : EXPECT_EQ(2, quarantine->GetCount());
786 E : heap_1.ReleaseHeap();
787 E : EXPECT_EQ(0, quarantine->GetCount());
788 E : }
789 :
790 E : TEST_F(BlockHeapManagerTest, AllocZeroBytes) {
791 E : ScopedHeap heap(heap_manager_);
792 E : void* mem1 = heap.Allocate(0);
793 E : ASSERT_NE(static_cast<void*>(nullptr), mem1);
794 E : void* mem2 = heap.Allocate(0);
795 E : ASSERT_NE(static_cast<void*>(nullptr), mem2);
796 E : ASSERT_NE(mem1, mem2);
797 E : ASSERT_TRUE(heap.Free(mem1));
798 E : ASSERT_TRUE(heap.Free(mem2));
799 E : }
800 :
801 E : TEST_F(BlockHeapManagerTest, AllocInvalidBlockSize) {
802 E : ScopedHeap heap(heap_manager_);
803 E : const size_t kInvalidSize = SIZE_MAX;
804 E : void* mem = heap.Allocate(kInvalidSize);
805 E : ASSERT_EQ(static_cast<void*>(nullptr), mem);
806 E : }
807 :
808 E : TEST_F(BlockHeapManagerTest, Size) {
809 E : const size_t kMaxAllocSize = 134584;
810 E : ScopedHeap heap(heap_manager_);
811 E : for (size_t size = 10; size < kMaxAllocSize; size = size * 5 + 123) {
812 E : void* mem = heap.Allocate(size);
813 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
814 E : ASSERT_EQ(size, heap_manager_->Size(heap.Id(), mem));
815 E : ASSERT_TRUE(heap.Free(mem));
816 E : }
817 E : }
818 :
819 E : TEST_F(BlockHeapManagerTest, AllocsAccessibility) {
820 E : const size_t kMaxAllocSize = 134584;
821 E : ScopedHeap heap(heap_manager_);
822 : // Ensure that the quarantine is large enough to keep the allocated blocks in
823 : // this test.
824 E : ::common::AsanParameters parameters = heap_manager_->parameters();
825 E : parameters.quarantine_size = kMaxAllocSize * 2;
826 E : heap_manager_->set_parameters(parameters);
827 E : for (size_t size = 10; size < kMaxAllocSize; size = size * 5 + 123) {
828 : // Do an alloc/free and test that access is correctly managed.
829 E : void* mem = heap.Allocate(size);
830 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
831 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(mem, size));
832 E : ASSERT_TRUE(heap.Free(mem));
833 E : ASSERT_NO_FATAL_FAILURE(VerifyFreedAccess(mem, size));
834 E : }
835 E : }
836 :
837 E : TEST_F(BlockHeapManagerTest, LockUnlock) {
838 E : ScopedHeap heap(heap_manager_);
839 : // We can't really test these, aside from not crashing.
840 E : ASSERT_NO_FATAL_FAILURE(heap_manager_->Lock(heap.Id()));
841 E : ASSERT_NO_FATAL_FAILURE(heap_manager_->Unlock(heap.Id()));
842 E : }
843 :
844 E : TEST_F(BlockHeapManagerTest, CaptureTID) {
845 E : const size_t kAllocSize = 13;
846 E : ScopedHeap heap(heap_manager_);
847 : // Ensure that the quarantine is large enough to keep this block.
848 E : ::common::AsanParameters parameters = heap_manager_->parameters();
849 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
850 E : heap_manager_->set_parameters(parameters);
851 E : uint8* mem = static_cast<uint8*>(heap.Allocate(kAllocSize));
852 E : BlockBody* body = reinterpret_cast<BlockBody*>(mem);
853 E : ASSERT_TRUE(heap.Free(mem));
854 E : BlockHeader* header = BlockGetHeaderFromBody(body);
855 E : ASSERT_NE(static_cast<BlockHeader*>(nullptr), header);
856 : EXPECT_TRUE(header->state == QUARANTINED_BLOCK ||
857 E : header->state == QUARANTINED_FLOODED_BLOCK);
858 E : BlockInfo block_info = {};
859 E : EXPECT_TRUE(BlockInfoFromMemory(header, &block_info));
860 E : EXPECT_NE(static_cast<BlockTrailer*>(nullptr), block_info.trailer);
861 :
862 E : EXPECT_EQ(block_info.trailer->alloc_tid, ::GetCurrentThreadId());
863 E : EXPECT_EQ(block_info.trailer->free_tid, ::GetCurrentThreadId());
864 E : }
865 :
866 E : TEST_F(BlockHeapManagerTest, QuarantineNeverAltersBlockContents) {
867 : // No blocks should be flood-filled when the feature is disabled.
868 E : EXPECT_NO_FATAL_FAILURE(QuarantineAltersBlockContents(0.0f, 10, 0, 0));
869 E : }
870 :
871 E : TEST_F(BlockHeapManagerTest, QuarantineSometimesAltersBlockContents) {
872 : // 100 fair coin tosses has a stddev of 5. The flood filled count will pretty
873 : // much always be within 3 stddevs of half of the tests unless something went
874 : // terribly wrong.
875 : EXPECT_NO_FATAL_FAILURE(QuarantineAltersBlockContents(
876 E : 0.5f, 100, 50 - 3 * 5, 50 + 3 * 5));
877 E : }
878 :
879 E : TEST_F(BlockHeapManagerTest, QuarantineAlwaysAltersBlockContents) {
880 : // All blocks should be flood-filled.
881 E : EXPECT_NO_FATAL_FAILURE(QuarantineAltersBlockContents(1.0f, 10, 10, 10));
882 E : }
883 :
884 E : TEST_F(BlockHeapManagerTest, SetTrailerPaddingSize) {
885 E : const size_t kAllocSize = 13;
886 E : ScopedHeap heap(heap_manager_);
887 : // Ensure that the quarantine is large enough to keep this block with the
888 : // extra padding.
889 E : ::common::AsanParameters parameters = heap_manager_->parameters();
890 E : parameters.quarantine_size = GetAllocSize(kAllocSize) * 5;
891 E : heap_manager_->set_parameters(parameters);
892 E : size_t original_alloc_size = GetAllocSize(kAllocSize);
893 E : ::common::AsanParameters original_parameter = heap_manager_->parameters();
894 :
895 E : for (size_t padding = 0; padding < 16; ++padding) {
896 E : ::common::AsanParameters new_parameter = original_parameter;
897 : new_parameter.trailer_padding_size =
898 E : original_parameter.trailer_padding_size + padding;
899 E : heap_manager_->set_parameters(new_parameter);
900 E : size_t augmented_alloc_size = GetAllocSize(kAllocSize);
901 E : EXPECT_GE(augmented_alloc_size, original_alloc_size);
902 :
903 E : void* mem = heap.Allocate(kAllocSize);
904 E : ASSERT_TRUE(mem != nullptr);
905 :
906 E : size_t offset = kAllocSize;
907 E : for (; offset < augmented_alloc_size - sizeof(BlockHeader);
908 E : ++offset) {
909 : EXPECT_FALSE(runtime_->shadow()->IsAccessible(
910 E : reinterpret_cast<const uint8*>(mem) + offset));
911 E : }
912 E : ASSERT_TRUE(heap.Free(mem));
913 E : }
914 E : heap_manager_->set_parameters(original_parameter);
915 E : }
916 :
917 E : TEST_F(BlockHeapManagerTest, BlockChecksumUpdatedWhenEnterQuarantine) {
918 E : const size_t kAllocSize = 100;
919 E : size_t real_alloc_size = GetAllocSize(kAllocSize);
920 E : ScopedHeap heap(heap_manager_);
921 :
922 E : ::common::AsanParameters parameters = heap_manager_->parameters();
923 E : parameters.quarantine_size = real_alloc_size;
924 E : heap_manager_->set_parameters(parameters);
925 :
926 E : void* mem = heap.Allocate(kAllocSize);
927 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
928 E : BlockInfo block_info = {};
929 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(mem, &block_info));
930 E : EXPECT_TRUE(BlockChecksumIsValid(block_info));
931 E : heap.Free(mem);
932 E : EXPECT_TRUE(BlockChecksumIsValid(block_info));
933 E : ASSERT_TRUE(heap.InQuarantine(mem));
934 E : }
935 :
936 : static const size_t kChecksumRepeatCount = 10;
937 :
938 E : TEST_F(BlockHeapManagerTest, CorruptAsEntersQuarantine) {
939 E : const size_t kAllocSize = 100;
940 E : ::common::AsanParameters parameters = heap_manager_->parameters();
941 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
942 E : heap_manager_->set_parameters(parameters);
943 :
944 E : ScopedHeap heap(heap_manager_);
945 : // This can fail because of a checksum collision. However, we run it a
946 : // handful of times to keep the chances as small as possible.
947 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
948 E : heap.FlushQuarantine();
949 E : void* mem = heap.Allocate(kAllocSize);
950 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
951 E : reinterpret_cast<int*>(mem)[-1] = rand();
952 E : EXPECT_TRUE(heap.Free(mem));
953 :
954 : // Try again for all but the last attempt if this appears to have failed.
955 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
956 i : continue;
957 :
958 E : ASSERT_EQ(1u, errors_.size());
959 E : ASSERT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
960 E : ASSERT_EQ(mem, errors_[0].location);
961 :
962 E : break;
963 i : }
964 E : }
965 :
966 E : TEST_F(BlockHeapManagerTest, CorruptAsExitsQuarantine) {
967 E : const size_t kAllocSize = 100;
968 E : ::common::AsanParameters parameters = heap_manager_->parameters();
969 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
970 E : heap_manager_->set_parameters(parameters);
971 :
972 E : ScopedHeap heap(heap_manager_);
973 : // This can fail because of a checksum collision. However, we run it a
974 : // handful of times to keep the chances as small as possible.
975 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
976 E : heap.FlushQuarantine();
977 E : void* mem = heap.Allocate(kAllocSize);
978 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
979 E : EXPECT_TRUE(heap.Free(mem));
980 E : EXPECT_TRUE(errors_.empty());
981 :
982 : // Change some of the block content and then flush the quarantine. The block
983 : // hash should be invalid and it should cause an error to be fired.
984 E : reinterpret_cast<int32*>(mem)[0] = rand();
985 E : heap.FlushQuarantine();
986 :
987 : // Try again for all but the last attempt if this appears to have failed.
988 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
989 i : continue;
990 :
991 E : EXPECT_EQ(1u, errors_.size());
992 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
993 : EXPECT_EQ(
994 : reinterpret_cast<BlockHeader*>(mem) - 1,
995 E : reinterpret_cast<BlockHeader*>(errors_[0].location));
996 :
997 E : break;
998 i : }
999 E : }
1000 :
1001 E : TEST_F(BlockHeapManagerTest, CorruptAsExitsQuarantineOnHeapDestroy) {
1002 E : const size_t kAllocSize = 100;
1003 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1004 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
1005 E : heap_manager_->set_parameters(parameters);
1006 :
1007 : // This can fail because of a checksum collision. However, we run it a
1008 : // handful of times to keep the chances as small as possible.
1009 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
1010 E : void* mem = nullptr;
1011 : {
1012 E : ScopedHeap heap(heap_manager_);
1013 E : heap.FlushQuarantine();
1014 E : mem = heap.Allocate(kAllocSize);
1015 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
1016 E : EXPECT_TRUE(heap.Free(mem));
1017 E : EXPECT_TRUE(errors_.empty());
1018 :
1019 : // Change some of the block content to invalidate the block's hash.
1020 E : reinterpret_cast<int32*>(mem)[0] = rand();
1021 E : }
1022 :
1023 : // The destructor of |heap| should be called and all the quarantined blocks
1024 : // belonging to this heap should be freed, which should trigger an error as
1025 : // the block is now corrupt.
1026 :
1027 : // Try again for all but the last attempt if this appears to have failed.
1028 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
1029 i : continue;
1030 :
1031 E : EXPECT_EQ(1u, errors_.size());
1032 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
1033 : EXPECT_EQ(reinterpret_cast<BlockHeader*>(mem) - 1,
1034 E : reinterpret_cast<BlockHeader*>(errors_[0].location));
1035 :
1036 E : break;
1037 i : }
1038 E : }
1039 :
1040 E : TEST_F(BlockHeapManagerTest, CorruptHeapOnTrimQuarantine) {
1041 E : const size_t kAllocSize = 100;
1042 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1043 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
1044 E : heap_manager_->set_parameters(parameters);
1045 :
1046 : // This can fail because of a checksum collision. However, we run it a
1047 : // handful of times to keep the chances as small as possible.
1048 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
1049 E : void* mem = nullptr;
1050 : {
1051 E : ScopedHeap heap(heap_manager_);
1052 E : heap.FlushQuarantine();
1053 E : mem = heap.Allocate(kAllocSize);
1054 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
1055 E : EXPECT_TRUE(heap.Free(mem));
1056 E : EXPECT_TRUE(errors_.empty());
1057 :
1058 : // Change some of the block content to invalidate the block's hash.
1059 E : reinterpret_cast<int32*>(mem)[0] = rand();
1060 E : }
1061 :
1062 : // The destructor of |heap| should be called and all the quarantined blocks
1063 : // belonging to this heap should be freed, which should trigger an error as
1064 : // the block is now corrupt.
1065 :
1066 : // Try again for all but the last attempt if this appears to have failed.
1067 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
1068 i : continue;
1069 :
1070 E : EXPECT_EQ(1u, errors_.size());
1071 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
1072 : EXPECT_EQ(reinterpret_cast<BlockHeader*>(mem) - 1,
1073 E : reinterpret_cast<BlockHeader*>(errors_[0].location));
1074 :
1075 E : break;
1076 i : }
1077 E : }
1078 :
1079 : // Prevent this test from being optimized, otherwise the loop that does the
1080 : // blocks allocations might get unwound and they won't have the same allocation
1081 : // stack trace.
1082 : #pragma optimize("", off)
1083 E : TEST_F(BlockHeapManagerTest, CorruptionIsReportedOnlyOnce) {
1084 E : const size_t kAllocSize = 100;
1085 E : const size_t kAllocs = 100;
1086 E : ASSERT_GT(kAllocs, kChecksumRepeatCount);
1087 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1088 E : parameters.quarantine_size = kAllocs * GetAllocSize(kAllocSize);
1089 E : parameters.prevent_duplicate_corruption_crashes = true;
1090 E : heap_manager_->set_parameters(parameters);
1091 :
1092 E : ScopedHeap heap(heap_manager_);
1093 E : std::vector<void*> allocs(kAllocs);
1094 :
1095 : // Allocate and free a lot of blocks with an identical stack id and corrupt
1096 : // them while they're in the quarantine.
1097 E : for (size_t i = 0; i < kAllocs; ++i) {
1098 E : void* mem = heap.Allocate(kAllocSize);
1099 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
1100 E : EXPECT_TRUE(heap.Free(mem));
1101 E : EXPECT_TRUE(errors_.empty());
1102 :
1103 : // Change some of the block content to corrupt it.
1104 E : reinterpret_cast<int32*>(mem)[0] ^= 0xFFFFFFFF;
1105 E : }
1106 :
1107 : // Empty the quarantine and free all the blocks that were in it. We should be
1108 : // reporting an error only for the first one.
1109 E : BlockQuarantineInterface::ObjectVector blocks;
1110 E : heap.GetQuarantine()->Empty(&blocks);
1111 E : bool first_corrupt_block_has_been_found = false;
1112 E : size_t i = 0;
1113 E : for (auto block : blocks) {
1114 E : errors_.clear();
1115 E : BlockInfo block_info = {};
1116 E : ConvertBlockInfo(block, &block_info);
1117 E : heap_manager_->FreePotentiallyCorruptBlock(&block_info);
1118 E : if (!first_corrupt_block_has_been_found && i < kChecksumRepeatCount) {
1119 E : if (!errors_.empty()) {
1120 E : EXPECT_EQ(1u, errors_.size());
1121 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
1122 E : first_corrupt_block_has_been_found = true;
1123 : }
1124 E : } else {
1125 E : EXPECT_TRUE(errors_.empty());
1126 : }
1127 E : ++i;
1128 E : }
1129 E : }
1130 : #pragma optimize("", on)
1131 :
1132 E : TEST_F(BlockHeapManagerTest, DoubleFree) {
1133 E : const size_t kAllocSize = 100;
1134 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1135 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
1136 E : heap_manager_->set_parameters(parameters);
1137 :
1138 E : ScopedHeap heap(heap_manager_);
1139 E : void* mem = heap.Allocate(kAllocSize);
1140 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
1141 E : EXPECT_TRUE(heap.Free(mem));
1142 E : EXPECT_FALSE(heap.Free(mem));
1143 :
1144 E : EXPECT_EQ(1u, errors_.size());
1145 E : EXPECT_EQ(DOUBLE_FREE, errors_[0].error_type);
1146 E : EXPECT_EQ(mem, errors_[0].location);
1147 E : }
1148 :
1149 E : TEST_F(BlockHeapManagerTest, SubsampledAllocationGuards) {
1150 E : ::common::AsanParameters parameters = heap_manager_->parameters();
1151 E : parameters.allocation_guard_rate = 0.5;
1152 E : heap_manager_->set_parameters(parameters);
1153 E : ScopedHeap heap(heap_manager_);
1154 :
1155 E : size_t guarded_allocations = 0;
1156 E : size_t unguarded_allocations = 0;
1157 :
1158 : // Make a handful of allocations.
1159 E : const size_t kAllocationCount = 10000;
1160 : const size_t kAllocationSizes[] = {
1161 E : 1, 2, 4, 8, 14, 30, 128, 237, 500, 1000, 2036 };
1162 E : std::vector<void*> allocations;
1163 E : for (size_t i = 0; i < kAllocationCount; ++i) {
1164 E : size_t alloc_size = kAllocationSizes[i % arraysize(kAllocationSizes)];
1165 E : void* alloc = heap.Allocate(alloc_size);
1166 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1167 :
1168 E : for (size_t i = 0; i < alloc_size; ++i) {
1169 : EXPECT_TRUE(runtime_->shadow()->IsAccessible(
1170 E : reinterpret_cast<uint8*>(alloc) + i));
1171 E : }
1172 :
1173 : // Determine if the allocation has guards or not.
1174 : BlockHeader* header = BlockGetHeaderFromBody(
1175 E : reinterpret_cast<BlockBody*>(alloc));
1176 E : if (header == nullptr) {
1177 E : ++unguarded_allocations;
1178 E : } else {
1179 E : ++guarded_allocations;
1180 : }
1181 :
1182 : if ((heap.GetHeapFeatures() &
1183 E : HeapInterface::kHeapSupportsGetAllocationSize) != 0) {
1184 : if ((heap.GetHeapFeatures() &
1185 E : HeapInterface::kHeapGetAllocationSizeIsUpperBound) != 0) {
1186 i : EXPECT_LE(alloc_size, heap_manager_->Size(heap.Id(), alloc));
1187 i : } else {
1188 E : EXPECT_EQ(alloc_size, heap_manager_->Size(heap.Id(), alloc));
1189 : }
1190 : }
1191 :
1192 : // Delete half of the allocations immediately, and keep half of them
1193 : // around for longer. This puts more of a stress test on the quarantine
1194 : // itself.
1195 E : if (base::RandDouble() < 0.5) {
1196 E : EXPECT_TRUE(heap.Free(alloc));
1197 E : } else {
1198 E : allocations.push_back(alloc);
1199 : }
1200 E : }
1201 :
1202 : // Free the outstanding allocations.
1203 E : for (size_t i = 0; i < allocations.size(); ++i)
1204 E : EXPECT_TRUE(heap.Free(allocations[i]));
1205 :
1206 : // Clear the quarantine. This should free up the remaining instrumented
1207 : // but quarantined blocks.
1208 E : EXPECT_NO_FATAL_FAILURE(heap.FlushQuarantine());
1209 :
1210 : // This could theoretically fail, but that would imply an extremely bad
1211 : // implementation of the underlying random number generator. There are 10000
1212 : // allocations. Since this is effectively a fair coin toss we expect a
1213 : // standard deviation of 0.5 * sqrt(10000) = 50. A 10% margin is
1214 : // 1000 / 50 = 20 standard deviations. For |z| > 20, the p-value is 5.5e-89,
1215 : // or 89 nines of confidence. That should keep any flake largely at bay.
1216 : // Thus, if this fails it's pretty much certain the implementation is at
1217 : // fault.
1218 E : EXPECT_LT(4 * kAllocationCount / 10, guarded_allocations);
1219 E : EXPECT_GT(6 * kAllocationCount / 10, guarded_allocations);
1220 E : }
1221 :
1222 : // Ensures that the ZebraBlockHeap overrides the provided heap.
1223 E : TEST_F(BlockHeapManagerTest, ZebraHeapIdInTrailerAfterAllocation) {
1224 E : EnableTestZebraBlockHeap();
1225 E : ScopedHeap heap(heap_manager_);
1226 E : const size_t kAllocSize = 0x100;
1227 E : void* alloc = heap.Allocate(kAllocSize);
1228 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1229 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1230 :
1231 : // Get the heap_id from the block trailer.
1232 E : BlockInfo block_info = {};
1233 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1234 :
1235 : {
1236 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1237 : // The heap_id stored in the block trailer should match the zebra heap id.
1238 : EXPECT_EQ(heap_manager_->zebra_block_heap_id_,
1239 E : block_info.trailer->heap_id);
1240 E : }
1241 :
1242 E : EXPECT_TRUE(heap.Free(alloc));
1243 E : }
1244 :
1245 : // Ensures that the provided heap is used when the ZebraBlockHeap cannot handle
1246 : // the allocation.
1247 E : TEST_F(BlockHeapManagerTest, DefaultHeapIdInTrailerWhenZebraHeapIsFull) {
1248 E : EnableTestZebraBlockHeap();
1249 E : ScopedHeap heap(heap_manager_);
1250 E : const size_t kAllocSize = 0x100;
1251 : // Refuse allocations on the ZebraBlockHeap.
1252 E : test_zebra_block_heap_->set_refuse_allocations(true);
1253 :
1254 E : void* alloc = heap.Allocate(kAllocSize);
1255 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1256 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1257 :
1258 : // Get the heap_id from the block trailer.
1259 E : BlockInfo block_info = {};
1260 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1261 : {
1262 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1263 : // The heap_id stored in the block trailer match the provided heap.
1264 E : EXPECT_EQ(heap.Id(), block_info.trailer->heap_id);
1265 E : }
1266 E : EXPECT_TRUE(heap.Free(alloc));
1267 E : }
1268 :
1269 : // Allocations larger than the page size (4KB) will not be served by the zebra
1270 : // heap.
1271 E : TEST_F(BlockHeapManagerTest, AllocStress) {
1272 E : EnableTestZebraBlockHeap();
1273 E : ScopedHeap heap(heap_manager_);
1274 E : for (size_t i = 0; i < 3000; ++i) {
1275 : // Sometimes allocate more than one page, to ensure that allocations get
1276 : // spread across the ZebraBlockheap and normal heaps.
1277 E : const size_t kAllocSize = (i * 997) % (9 * 1024);
1278 E : void* alloc = heap.Allocate(kAllocSize);
1279 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1280 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1281 : // Free should succeed, even if the block is quarantined.
1282 E : EXPECT_TRUE(heap.Free(alloc));
1283 E : }
1284 E : }
1285 :
1286 : // The BlockHeapManager correctly quarantines the memory after free.
1287 E : TEST_F(BlockHeapManagerTest, QuarantinedAfterFree) {
1288 E : EnableTestZebraBlockHeap();
1289 E : ScopedHeap heap(heap_manager_);
1290 : // Always quarantine if possible.
1291 E : test_zebra_block_heap_->set_quarantine_ratio(1.0);
1292 :
1293 E : const size_t kAllocSize = 0x100;
1294 E : void* alloc = heap.Allocate(kAllocSize);
1295 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1296 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1297 : // Free should succeed, even if the block is quarantined.
1298 E : EXPECT_TRUE(heap.Free(alloc));
1299 : // The block should be quarantined and poisoned.
1300 E : ASSERT_NO_FATAL_FAILURE(VerifyFreedAccess(alloc, kAllocSize));
1301 E : BlockInfo block_info = {};
1302 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1303 :
1304 : {
1305 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1306 : EXPECT_TRUE(block_info.header->state == QUARANTINED_BLOCK ||
1307 E : block_info.header->state == QUARANTINED_FLOODED_BLOCK);
1308 E : }
1309 E : }
1310 :
1311 : // set_parameters should set the zebra_block_heap_quarantine_ratio flag
1312 : // correctly.
1313 E : TEST_F(BlockHeapManagerTest, set_parametersSetsZebraBlockHeapQuarantineRatio) {
1314 E : EnableTestZebraBlockHeap();
1315 E : float new_ratio = 1.0f / 8;
1316 E : ::common::AsanParameters params = heap_manager_->parameters();
1317 E : params.zebra_block_heap_quarantine_ratio = new_ratio;
1318 E : heap_manager_->set_parameters(params);
1319 E : EXPECT_EQ(new_ratio, test_zebra_block_heap_->quarantine_ratio());
1320 E : }
1321 :
1322 : // Test for double free errors using the zebra heap.
1323 E : TEST_F(BlockHeapManagerTest, DoubleFreeOnZebraHeap) {
1324 E : EnableTestZebraBlockHeap();
1325 E : ScopedHeap heap(heap_manager_);
1326 E : test_zebra_block_heap_->set_quarantine_ratio(1.0);
1327 :
1328 E : const size_t kAllocSize = 0xFF;
1329 E : void* alloc = heap.Allocate(kAllocSize);
1330 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1331 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1332 :
1333 E : EXPECT_TRUE(heap.Free(alloc));
1334 E : EXPECT_FALSE(heap.Free(alloc));
1335 :
1336 E : EXPECT_EQ(1u, errors_.size());
1337 E : EXPECT_EQ(DOUBLE_FREE, errors_[0].error_type);
1338 E : EXPECT_EQ(alloc, errors_[0].location);
1339 E : }
1340 :
1341 E : TEST_F(BlockHeapManagerTest, AllocatedBlockIsProtected) {
1342 E : EnableTestZebraBlockHeap();
1343 E : ScopedHeap heap(heap_manager_);
1344 :
1345 E : const size_t kAllocSize = 0xFF;
1346 E : void* alloc = heap.Allocate(kAllocSize);
1347 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1348 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1349 :
1350 E : BlockInfo block_info = {};
1351 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1352 :
1353 : // Test the block protections before being quarantined.
1354 : // The whole block should be unpoisoned in the shadow memory.
1355 E : for (size_t i = 0; i < block_info.body_size; ++i)
1356 E : EXPECT_TRUE(runtime_->shadow()->IsAccessible(block_info.RawBody() + i));
1357 :
1358 : // Ensure that the block left redzone is page-protected.
1359 E : for (size_t i = 0; i < block_info.left_redzone_pages_size; ++i)
1360 i : EXPECT_TRUE(IsNotAccessible(block_info.left_redzone_pages + i));
1361 :
1362 : // Ensure that the block right redzone is page-protected.
1363 E : for (size_t i = 0; i < block_info.right_redzone_pages_size; ++i)
1364 E : EXPECT_TRUE(IsNotAccessible(block_info.right_redzone_pages + i));
1365 :
1366 : // The block body should be accessible.
1367 E : for (size_t i = 0; i < block_info.body_size; ++i)
1368 E : EXPECT_TRUE(IsAccessible(block_info.RawBody() + i));
1369 :
1370 : {
1371 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1372 E : EXPECT_EQ(ALLOCATED_BLOCK, block_info.header->state);
1373 E : }
1374 :
1375 E : EXPECT_TRUE(heap.Free(alloc));
1376 E : }
1377 :
1378 E : TEST_F(BlockHeapManagerTest, QuarantinedBlockIsProtected) {
1379 E : EnableTestZebraBlockHeap();
1380 E : ScopedHeap heap(heap_manager_);
1381 : // Always quarantine if possible.
1382 E : test_zebra_block_heap_->set_quarantine_ratio(1.0);
1383 :
1384 E : for (size_t i = 0; i < 20; ++i) {
1385 E : const size_t kAllocSize = 0xFF + i;
1386 E : void* alloc = heap.Allocate(kAllocSize);
1387 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1388 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1389 :
1390 E : BlockInfo block_info = {};
1391 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1392 :
1393 : // The block is freed and quarantined.
1394 E : EXPECT_TRUE(heap.Free(alloc));
1395 :
1396 : // Test the block protections after being quarantined.
1397 : // The whole block should be poisoned in the shadow memory.
1398 E : for (size_t i = 0; i < block_info.body_size; ++i) {
1399 E : EXPECT_FALSE(runtime_->shadow()->IsAccessible(block_info.RawBody() + i));
1400 E : }
1401 :
1402 : // Ensure that the block left redzone is page-protected.
1403 E : for (size_t i = 0; i < block_info.left_redzone_pages_size; ++i)
1404 i : EXPECT_TRUE(IsNotAccessible(block_info.left_redzone_pages + i));
1405 :
1406 : // Ensure that the block right redzone is page-protected.
1407 E : for (size_t i = 0; i < block_info.right_redzone_pages_size; ++i)
1408 E : EXPECT_TRUE(IsNotAccessible(block_info.right_redzone_pages + i));
1409 :
1410 : // Ensure that the block body is page-protected.
1411 E : for (size_t i = 0; i < block_info.body_size; ++i)
1412 E : EXPECT_TRUE(IsNotAccessible(block_info.RawBody() + i));
1413 :
1414 : {
1415 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1416 : EXPECT_TRUE(block_info.header->state == QUARANTINED_BLOCK ||
1417 E : block_info.header->state == QUARANTINED_FLOODED_BLOCK);
1418 E : }
1419 E : }
1420 E : }
1421 :
1422 E : TEST_F(BlockHeapManagerTest, NonQuarantinedBlockIsMarkedAsFreed) {
1423 E : EnableTestZebraBlockHeap();
1424 E : ScopedHeap heap(heap_manager_);
1425 : // Desaible the zebra heap quarantine.
1426 E : test_zebra_block_heap_->set_refuse_push(true);
1427 :
1428 E : const size_t kAllocSize = 0x100;
1429 E : void* alloc = heap.Allocate(kAllocSize);
1430 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1431 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1432 :
1433 E : BlockInfo block_info = {};
1434 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1435 :
1436 : // The block is freed but not quarantined.
1437 E : EXPECT_TRUE(heap.Free(alloc));
1438 :
1439 : // The whole block should be unpoisoned in the shadow memory, and its
1440 : // associated pages unprotected.
1441 E : for (size_t i = 0; i < block_info.block_size; ++i) {
1442 : ASSERT_NO_FATAL_FAILURE(runtime_->shadow()->IsAccessible(
1443 E : block_info.RawBlock() + i));
1444 : ASSERT_FALSE(runtime_->shadow()->PageIsProtected(
1445 E : block_info.RawBlock() + i));
1446 E : }
1447 :
1448 E : EXPECT_EQ(FREED_BLOCK, block_info.header->state);
1449 E : }
1450 :
1451 E : TEST_F(BlockHeapManagerTest, ZebraBlockHeapQuarantineRatioIsRespected) {
1452 E : EnableTestZebraBlockHeap();
1453 E : ScopedHeap heap(heap_manager_);
1454 : // Set a non-standard quarantine ratio.
1455 E : float quarantine_ratio = 0.37f;
1456 E : test_zebra_block_heap_->set_quarantine_ratio(quarantine_ratio);
1457 :
1458 E : const size_t kAllocations = 2000;
1459 :
1460 E : size_t zebra_heap_size = test_zebra_block_heap_->slab_count_;
1461 E : const size_t max_quarantine_size = zebra_heap_size * quarantine_ratio;
1462 :
1463 : // All allocations have a maximum size of 1KB, all are served by the zebra
1464 : // heap.
1465 E : for (size_t i = 0; i < kAllocations; ++i) {
1466 E : const size_t kAllocSize = (0x100 + i) % 1024;
1467 E : void* alloc = heap.Allocate(kAllocSize);
1468 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1469 :
1470 E : BlockInfo block_info = {};
1471 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1472 E : EXPECT_TRUE(heap.Free(alloc));
1473 :
1474 : // After Free the quarantine should be trimmed, enforcing the quarantine
1475 : // size upper bound.
1476 E : EXPECT_LE(test_zebra_block_heap_->GetCount(), max_quarantine_size);
1477 :
1478 : {
1479 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1480 : EXPECT_TRUE(block_info.header->state == QUARANTINED_BLOCK ||
1481 E : block_info.header->state == QUARANTINED_FLOODED_BLOCK);
1482 E : }
1483 E : }
1484 E : }
1485 :
1486 : // Ensures that the LargeBlockHeap overrides the provided heap if the allocation
1487 : // size exceeds the threshold.
1488 E : TEST_F(BlockHeapManagerTest, LargeBlockHeapUsedForLargeAllocations) {
1489 E : EnableLargeBlockHeap(GetPageSize());
1490 :
1491 : // Disable targeted heaps as it interferes with this test.
1492 E : ::common::AsanParameters params = heap_manager_->parameters();
1493 E : heap_manager_->SetParameters(params);
1494 :
1495 E : ScopedHeap heap(heap_manager_);
1496 :
1497 E : const size_t kAllocSize = GetPageSize() + 0x100;
1498 E : void* alloc = heap.Allocate(kAllocSize);
1499 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1500 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1501 :
1502 : // Get the heap_id from the block trailer.
1503 E : BlockInfo block_info = {};
1504 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1505 :
1506 : {
1507 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1508 : // The heap_id stored in the block trailer should match the large block
1509 : // heap id.
1510 : EXPECT_EQ(heap_manager_->large_block_heap_id_,
1511 E : block_info.trailer->heap_id);
1512 E : }
1513 :
1514 E : EXPECT_TRUE(heap.Free(alloc));
1515 E : }
1516 :
1517 : // Ensures that the LargeBlockHeap is not used for a small allocation.
1518 E : TEST_F(BlockHeapManagerTest, LargeBlockHeapNotUsedForSmallAllocations) {
1519 E : EnableLargeBlockHeap(GetPageSize());
1520 E : ScopedHeap heap(heap_manager_);
1521 :
1522 E : const size_t kAllocSize = 0x100;
1523 E : void* alloc = heap.Allocate(kAllocSize);
1524 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1525 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1526 :
1527 : // Get the heap_id from the block trailer.
1528 E : BlockInfo block_info = {};
1529 E : EXPECT_TRUE(runtime_->shadow()->BlockInfoFromShadow(alloc, &block_info));
1530 :
1531 : {
1532 E : ScopedBlockAccess block_access(block_info, runtime_->shadow());
1533 : // The provided heap ID should be the one in the block trailer.
1534 E : EXPECT_EQ(heap.Id(), block_info.trailer->heap_id);
1535 E : }
1536 :
1537 E : EXPECT_TRUE(heap.Free(alloc));
1538 E : }
1539 :
1540 E : TEST_F(BlockHeapManagerTest, AllocationFilterFlag) {
1541 E : EXPECT_NE(TLS_OUT_OF_INDEXES, heap_manager_->allocation_filter_flag_tls_);
1542 E : heap_manager_->set_allocation_filter_flag(true);
1543 E : EXPECT_TRUE(heap_manager_->allocation_filter_flag());
1544 E : heap_manager_->set_allocation_filter_flag(false);
1545 E : EXPECT_FALSE(heap_manager_->allocation_filter_flag());
1546 E : heap_manager_->set_allocation_filter_flag(true);
1547 E : EXPECT_TRUE(heap_manager_->allocation_filter_flag());
1548 E : }
1549 :
1550 : namespace {
1551 :
1552 E : size_t CountLockedHeaps(HeapInterface** heaps) {
1553 E : size_t i = 0;
1554 E : while (heaps[i] != nullptr) {
1555 E : ++i;
1556 E : }
1557 E : return i;
1558 E : }
1559 :
1560 : } // namespace
1561 :
1562 E : TEST_F(BlockHeapManagerTest, BestEffortLockAllNoLocksHeld) {
1563 E : heap_manager_->BestEffortLockAll();
1564 : EXPECT_EQ(CountLockedHeaps(heap_manager_->locked_heaps_),
1565 E : heap_manager_->heaps_.size());
1566 E : heap_manager_->UnlockAll();
1567 E : }
1568 :
1569 : namespace {
1570 :
1571 : // A helper thread runner for acquiring a HeapInterface lock for a certain
1572 : // amount of time.
1573 : class GrabHeapLockRunner : public base::DelegateSimpleThread::Delegate {
1574 : public:
1575 E : explicit GrabHeapLockRunner(HeapInterface* heap)
1576 : : heap_(heap), cv_(&cv_lock_), acquired_(false), release_(false) {
1577 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), heap);
1578 E : }
1579 :
1580 E : virtual void Run() {
1581 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), heap_);
1582 E : heap_->Lock();
1583 E : SignalAcquired();
1584 E : WaitRelease();
1585 E : heap_->Unlock();
1586 E : }
1587 :
1588 : // Waits until |acquired| is true.
1589 E : void WaitAcquired() {
1590 E : while (true) {
1591 E : base::AutoLock auto_lock(cv_lock_);
1592 E : if (acquired_)
1593 E : return;
1594 i : cv_.Wait();
1595 i : }
1596 E : }
1597 :
1598 : // To be called externally to notify this runner that the lock may be
1599 : // released and the thread torn down.
1600 E : void SignalRelease() {
1601 E : base::AutoLock auto_lock(cv_lock_);
1602 E : release_ = true;
1603 E : cv_.Broadcast();
1604 E : }
1605 :
1606 : private:
1607 : // Notifies external observers that the lock has been acquired.
1608 E : void SignalAcquired() {
1609 E : base::AutoLock auto_lock(cv_lock_);
1610 E : acquired_ = true;
1611 E : cv_.Broadcast();
1612 E : }
1613 :
1614 : // Waits until |release| is true.
1615 E : void WaitRelease() {
1616 E : while (true) {
1617 E : base::AutoLock auto_lock(cv_lock_);
1618 E : if (release_)
1619 E : return;
1620 E : cv_.Wait();
1621 E : }
1622 E : }
1623 :
1624 : HeapInterface* heap_;
1625 : base::Lock cv_lock_;
1626 : base::ConditionVariable cv_;
1627 : bool acquired_;
1628 : bool release_;
1629 :
1630 : DISALLOW_COPY_AND_ASSIGN(GrabHeapLockRunner);
1631 : };
1632 :
1633 : } // namespace
1634 :
1635 E : TEST_F(BlockHeapManagerTest, BestEffortLockAllOneHeapLockHeld) {
1636 E : ASSERT_FALSE(heap_manager_->heaps_.empty());
1637 E : GrabHeapLockRunner runner(heap_manager_->heaps_.begin()->first);
1638 E : base::DelegateSimpleThread thread(&runner, "GrabHeapLockRunner");
1639 E : thread.Start();
1640 E : runner.WaitAcquired();
1641 E : heap_manager_->BestEffortLockAll();
1642 :
1643 : // Expect all but one heap lock to have been acquired.
1644 : EXPECT_EQ(CountLockedHeaps(heap_manager_->locked_heaps_),
1645 E : heap_manager_->heaps_.size() - 1);
1646 E : heap_manager_->UnlockAll();
1647 E : runner.SignalRelease();
1648 E : thread.Join();
1649 E : }
1650 :
1651 : // These functions are tested explicitly because the AsanRuntime reaches in
1652 : // to use them.
1653 :
1654 E : TEST_F(BlockHeapManagerTest, IsValidHeapIdUnlocked) {
1655 E : ASSERT_FALSE(heap_manager_->heaps_.empty());
1656 E : EXPECT_FALSE(heap_manager_->IsValidHeapIdUnlocked(0xDEADBEEF, false));
1657 E : for (auto& hq_pair : heap_manager_->heaps_) {
1658 E : TestBlockHeapManager::HeapQuarantinePair* hq = &hq_pair;
1659 : TestBlockHeapManager::HeapId heap_id =
1660 E : reinterpret_cast<TestBlockHeapManager::HeapId>(hq);
1661 E : EXPECT_TRUE(heap_manager_->IsValidHeapIdUnlocked(heap_id, false));
1662 E : }
1663 E : }
1664 :
1665 E : TEST_F(BlockHeapManagerTest, GetHeapTypeUnlocked) {
1666 E : ASSERT_FALSE(heap_manager_->heaps_.empty());
1667 E : for (auto& hq_pair : heap_manager_->heaps_) {
1668 E : TestBlockHeapManager::HeapQuarantinePair* hq = &hq_pair;
1669 : TestBlockHeapManager::HeapId heap_id =
1670 E : reinterpret_cast<TestBlockHeapManager::HeapId>(hq);
1671 E : EXPECT_NE(kUnknownHeapType, heap_manager_->GetHeapTypeUnlocked(heap_id));
1672 E : }
1673 E : }
1674 :
1675 E : TEST_F(BlockHeapManagerTest, ComputeRelativeStackId) {
1676 : // This test is done here and not in stack_capture_unittest, as the latter
1677 : // doesn't have the provision for faking the module address and would
1678 : // therefore ignore all the frames.
1679 E : common::StackCapture stack;
1680 E : stack.InitFromStack();
1681 :
1682 E : EXPECT_NE(0U, stack.relative_stack_id());
1683 E : }
1684 :
1685 : namespace {
1686 :
1687 : bool ShadowIsConsistentPostAlloc(
1688 : Shadow* shadow, const void* alloc, size_t size) {
1689 : uintptr_t index = reinterpret_cast<uintptr_t>(alloc);
1690 : index >>= kShadowRatioLog;
1691 : uintptr_t index_end = index + (size >> kShadowRatioLog);
1692 : for (size_t i = index; i < index_end; ++i) {
1693 : if (shadow->shadow()[i] != ShadowMarker::kHeapAddressableMarker)
1694 : return false;
1695 : }
1696 : return true;
1697 : }
1698 :
1699 : bool ShadowIsConsistentPostFree(
1700 : Shadow* shadow, const void* alloc, size_t size) {
1701 : DCHECK_NE(static_cast<Shadow*>(nullptr), shadow);
1702 : uintptr_t index = reinterpret_cast<uintptr_t>(alloc);
1703 : index >>= kShadowRatioLog;
1704 : uintptr_t index_end = index + (size >> kShadowRatioLog);
1705 :
1706 : uint8 m = shadow->shadow()[index];
1707 : if (m != ShadowMarker::kHeapAddressableMarker &&
1708 : m != ShadowMarker::kAsanReservedMarker &&
1709 : m != ShadowMarker::kHeapFreedMarker) {
1710 : return false;
1711 : }
1712 :
1713 : // We expect green memory only for large allocations which are directly
1714 : // mapped. Small allocations should be returned to a common pool and
1715 : // marked as reserved.CtMalloc heap.
1716 : if (m == ShadowMarker::kHeapAddressableMarker && size < 1 * 1024 * 1024)
1717 : return false;
1718 :
1719 : for (size_t i = index; i < index_end; ++i) {
1720 : if (shadow->shadow()[index] != m)
1721 : return false;
1722 : }
1723 : return true;
1724 : }
1725 :
1726 : class BlockHeapManagerIntegrationTest : public testing::Test {
1727 : public:
1728 : BlockHeapManagerIntegrationTest()
1729 : : shadow_() {
1730 : }
1731 :
1732 : void SetUp() override {
1733 : shadow_.SetUp();
1734 : ASSERT_TRUE(shadow_.IsClean());
1735 : }
1736 :
1737 : void TearDown() override {
1738 : ASSERT_TRUE(shadow_.IsClean());
1739 : shadow_.TearDown();
1740 : }
1741 :
1742 : testing::DebugShadow shadow_;
1743 : };
1744 :
1745 : } // namespace
1746 :
1747 : } // namespace heap_managers
1748 : } // namespace asan
1749 : } // namespace agent
|