1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/heap_managers/block_heap_manager.h"
16 :
17 : #include <vector>
18 :
19 : #include "base/bind.h"
20 : #include "base/compiler_specific.h"
21 : #include "base/rand_util.h"
22 : #include "base/sha1.h"
23 : #include "base/debug/alias.h"
24 : #include "base/synchronization/condition_variable.h"
25 : #include "base/synchronization/lock.h"
26 : #include "base/threading/simple_thread.h"
27 : #include "gmock/gmock.h"
28 : #include "gtest/gtest.h"
29 : #include "syzygy/agent/asan/asan_rtl_impl.h"
30 : #include "syzygy/agent/asan/asan_runtime.h"
31 : #include "syzygy/agent/asan/block.h"
32 : #include "syzygy/agent/asan/heap.h"
33 : #include "syzygy/agent/asan/page_protection_helpers.h"
34 : #include "syzygy/agent/asan/stack_capture_cache.h"
35 : #include "syzygy/agent/asan/unittest_util.h"
36 : #include "syzygy/agent/asan/heaps/ctmalloc_heap.h"
37 : #include "syzygy/agent/asan/heaps/internal_heap.h"
38 : #include "syzygy/agent/asan/heaps/large_block_heap.h"
39 : #include "syzygy/agent/asan/heaps/simple_block_heap.h"
40 : #include "syzygy/agent/asan/heaps/win_heap.h"
41 : #include "syzygy/agent/asan/heaps/zebra_block_heap.h"
42 : #include "syzygy/agent/asan/memory_notifiers/shadow_memory_notifier.h"
43 : #include "syzygy/common/asan_parameters.h"
44 :
45 : namespace agent {
46 : namespace asan {
47 : namespace heap_managers {
48 :
49 : namespace {
50 :
51 : using heaps::ZebraBlockHeap;
52 : using testing::IsAccessible;
53 : using testing::IsNotAccessible;
54 :
55 : typedef BlockHeapManager::HeapId HeapId;
56 :
57 E : testing::DummyHeap dummy_heap;
58 E : agent::asan::memory_notifiers::ShadowMemoryNotifier shadow_notifier;
59 :
60 : // A fake ZebraBlockHeap to simplify unit testing.
61 : // Wrapper with switches to enable/disable the quarantine and accept/refuse
62 : // allocations.
63 : class TestZebraBlockHeap : public heaps::ZebraBlockHeap {
64 : public:
65 : using ZebraBlockHeap::set_quarantine_ratio;
66 : using ZebraBlockHeap::quarantine_ratio;
67 : using ZebraBlockHeap::slab_count_;
68 :
69 : // Constructor.
70 E : TestZebraBlockHeap()
71 : : ZebraBlockHeap(1024 * 1024, &shadow_notifier, &dummy_heap) {
72 E : refuse_allocations_ = false;
73 E : refuse_push_ = false;
74 E : }
75 :
76 : // Virtual destructor.
77 E : virtual ~TestZebraBlockHeap() { }
78 :
79 : // Wrapper that allows easily disabling allocations.
80 : virtual void* AllocateBlock(size_t size,
81 : size_t min_left_redzone_size,
82 : size_t min_right_redzone_size,
83 E : BlockLayout* layout) OVERRIDE {
84 E : if (refuse_allocations_)
85 E : return nullptr;
86 : return ZebraBlockHeap::AllocateBlock(size,
87 : min_left_redzone_size,
88 : min_right_redzone_size,
89 E : layout);
90 E : }
91 :
92 : // Wrapper that allows easily disabling the insertion of new blocks in the
93 : // quarantine.
94 E : virtual bool Push(const CompactBlockInfo& info) OVERRIDE {
95 E : if (refuse_push_)
96 E : return false;
97 E : return ZebraBlockHeap::Push(info);
98 E : }
99 :
100 : // Enable/Disable future allocations.
101 E : void set_refuse_allocations(bool value) {
102 E : refuse_allocations_ = value;
103 E : }
104 :
105 : // Enable/Disable the insertion of blocks in the quarantine.
106 E : void set_refuse_push(bool value) {
107 E : refuse_push_ = value;
108 E : }
109 :
110 : protected:
111 : bool refuse_allocations_;
112 : bool refuse_push_;
113 :
114 : private:
115 : DISALLOW_COPY_AND_ASSIGN(TestZebraBlockHeap);
116 : };
117 :
118 : // A derived class to expose protected members for unit-testing.
119 : class TestBlockHeapManager : public BlockHeapManager {
120 : public:
121 : using BlockHeapManager::HeapQuarantinePair;
122 :
123 : using BlockHeapManager::FreePotentiallyCorruptBlock;
124 : using BlockHeapManager::GetHeapId;
125 : using BlockHeapManager::GetHeapFromId;
126 : using BlockHeapManager::GetHeapTypeUnlocked;
127 : using BlockHeapManager::GetQuarantineFromId;
128 : using BlockHeapManager::HeapMetadata;
129 : using BlockHeapManager::HeapQuarantineMap;
130 : using BlockHeapManager::IsValidHeapIdUnlocked;
131 : using BlockHeapManager::SetHeapErrorCallback;
132 : using BlockHeapManager::ShardedBlockQuarantine;
133 : using BlockHeapManager::TrimQuarantine;
134 :
135 : using BlockHeapManager::allocation_filter_flag_tls_;
136 : using BlockHeapManager::heaps_;
137 : using BlockHeapManager::large_block_heap_id_;
138 : using BlockHeapManager::locked_heaps_;
139 : using BlockHeapManager::parameters_;
140 : using BlockHeapManager::rate_targeted_heaps_;
141 : using BlockHeapManager::rate_targeted_heaps_count_;
142 : using BlockHeapManager::targeted_heaps_info_;
143 : using BlockHeapManager::zebra_block_heap_;
144 : using BlockHeapManager::zebra_block_heap_id_;
145 :
146 : using BlockHeapManager::kRateTargetedHeapCount;
147 : using BlockHeapManager::kDefaultRateTargetedHeapsMinBlockSize;
148 :
149 : // A derived class to expose protected members for unit-testing. This has to
150 : // be nested into this one because ShardedBlockQuarantine accesses some
151 : // protected fields of BlockHeapManager.
152 : //
153 : // This class should only expose some members or expose new functions, no new
154 : // member should be added.
155 : class TestQuarantine : public ShardedBlockQuarantine {
156 : public:
157 : using ShardedBlockQuarantine::Node;
158 : using ShardedBlockQuarantine::kShardingFactor;
159 : using ShardedBlockQuarantine::heads_;
160 : };
161 :
162 : // Constructor.
163 E : explicit TestBlockHeapManager(StackCaptureCache* stack_cache)
164 : : BlockHeapManager(stack_cache) {
165 E : }
166 :
167 : // Removes the heap with the given ID.
168 E : void RemoveHeapById(HeapId heap_id) {
169 E : if (heap_id == 0)
170 E : return;
171 E : BlockHeapInterface* heap = GetHeapFromId(heap_id);
172 E : delete heap;
173 E : EXPECT_EQ(1, heaps_.erase(heap));
174 E : }
175 :
176 : // Wrapper for the set_parameters method. This also takes care of
177 : // reinitializing the variables that are usually initialized in the
178 : // constructor of a BlockHeapManager.
179 E : void SetParameters(const ::common::AsanParameters& params) {
180 E : bool ctmalloc_changed = false;
181 :
182 : // Set the parameters.
183 : {
184 E : base::AutoLock lock(lock_);
185 E : ctmalloc_changed = params.enable_ctmalloc != parameters_.enable_ctmalloc;
186 E : parameters_ = params;
187 E : }
188 :
189 : // Reinitialize the internal and special heaps if necessary.
190 E : if (ctmalloc_changed) {
191 : // Since the zebra and large block heaps use the internal heap they
192 : // must also be reset.
193 E : RemoveHeapById(large_block_heap_id_);
194 E : RemoveHeapById(zebra_block_heap_id_);
195 E : large_block_heap_id_ = 0;
196 E : zebra_block_heap_id_ = 0;
197 E : for (size_t i = 0; i < kRateTargetedHeapCount; ++i) {
198 E : RemoveHeapById(rate_targeted_heaps_[i]);
199 E : rate_targeted_heaps_[i] = 0;
200 E : rate_targeted_heaps_count_[i] = 0;
201 E : }
202 :
203 E : internal_heap_.reset();
204 E : internal_win_heap_.reset();
205 E : InitInternalHeap();
206 E : InitRateTargetedHeaps();
207 : }
208 :
209 E : PropagateParameters();
210 :
211 : // Reinitialize the process heap if necessary.
212 E : if (ctmalloc_changed) {
213 E : EXPECT_EQ(1, underlying_heaps_map_.erase(process_heap_));
214 E : EXPECT_EQ(1, heaps_.erase(process_heap_));
215 E : delete process_heap_;
216 E : process_heap_ = nullptr;
217 E : if (process_heap_underlying_heap_) {
218 E : delete process_heap_underlying_heap_;
219 E : process_heap_underlying_heap_ = nullptr;
220 : }
221 E : InitProcessHeap();
222 : }
223 E : }
224 : };
225 :
226 : // A derived class to expose protected members for unit-testing.
227 : class TestAsanRuntime : public agent::asan::AsanRuntime {
228 : public:
229 : using agent::asan::AsanRuntime::heap_manager_;
230 : };
231 :
232 : // A utility class for manipulating a heap. This automatically deletes the heap
233 : // and its content in the destructor and provides some utility functions.
234 : class ScopedHeap {
235 : public:
236 : typedef TestBlockHeapManager::TestQuarantine TestQuarantine;
237 :
238 : // Constructor.
239 E : explicit ScopedHeap(TestBlockHeapManager* heap_manager)
240 : : heap_manager_(heap_manager) {
241 E : heap_id_ = heap_manager->CreateHeap();
242 E : EXPECT_NE(0u, heap_id_);
243 E : }
244 :
245 : // Destructor. Destroy the heap, this will flush its quarantine and delete all
246 : // the structures associated with this heap.
247 E : ~ScopedHeap() {
248 E : ReleaseHeap();
249 E : }
250 :
251 E : void ReleaseHeap() {
252 E : if (heap_id_ != 0) {
253 E : EXPECT_TRUE(heap_manager_->DestroyHeap(heap_id_));
254 E : heap_id_ = 0;
255 : }
256 E : }
257 :
258 : // Retrieves the quarantine associated with this heap.
259 E : BlockQuarantineInterface* GetQuarantine() {
260 E : return heap_manager_->GetQuarantineFromId(heap_id_);
261 E : }
262 :
263 : // Allocate a block of @p size bytes.
264 E : void* Allocate(size_t size) {
265 E : return heap_manager_->Allocate(heap_id_, size);
266 E : }
267 :
268 : // Free the block @p mem.
269 E : bool Free(void* mem) {
270 E : return heap_manager_->Free(heap_id_, mem);
271 E : }
272 :
273 : // Flush the quarantine of this heap.
274 E : void FlushQuarantine() {
275 E : BlockQuarantineInterface* quarantine = GetQuarantine();
276 E : EXPECT_NE(static_cast<BlockQuarantineInterface*>(nullptr),
277 : quarantine);
278 E : BlockQuarantineInterface::ObjectVector blocks_to_free;
279 E : quarantine->Empty(&blocks_to_free);
280 : BlockQuarantineInterface::ObjectVector::iterator iter_block =
281 E : blocks_to_free.begin();
282 E : for (; iter_block != blocks_to_free.end(); ++iter_block) {
283 E : const CompactBlockInfo& compact = *iter_block;
284 E : BlockInfo expanded = {};
285 E : ConvertBlockInfo(compact, &expanded);
286 E : CHECK(heap_manager_->FreePotentiallyCorruptBlock(&expanded));
287 E : }
288 E : }
289 :
290 : // Returns the underlying heap ID.
291 E : HeapId Id() { return heap_id_; }
292 :
293 : // Determines if the address @p mem corresponds to a block in the quarantine
294 : // of this heap.
295 E : bool InQuarantine(const void* mem) {
296 : // As we'll cast an AsanShardedQuarantine directly into a TestQuarantine
297 : // there shouldn't be any new field defined by this class, this should only
298 : // act as an interface allowing to access some private fields.
299 : COMPILE_ASSERT(
300 : sizeof(TestQuarantine) ==
301 : sizeof(TestBlockHeapManager::ShardedBlockQuarantine),
302 : test_quarantine_is_not_an_interface);
303 : TestQuarantine* test_quarantine =
304 E : reinterpret_cast<TestQuarantine*>(GetQuarantine());
305 E : EXPECT_NE(static_cast<TestQuarantine*>(nullptr), test_quarantine);
306 : // Search through all of the shards.
307 E : for (size_t i = 0; i < test_quarantine->kShardingFactor; ++i) {
308 : // Search through all blocks in each shard.
309 E : TestQuarantine::Node* current_node = test_quarantine->heads_[i];
310 E : while (current_node != nullptr) {
311 : const uint8* body = current_node->object.block +
312 E : current_node->object.header_size;
313 E : if (body == mem) {
314 : const BlockHeader* header = reinterpret_cast<BlockHeader*>(
315 E : current_node->object.block);
316 E : EXPECT_EQ(QUARANTINED_BLOCK, header->state);
317 E : return true;
318 : }
319 E : current_node = current_node->next;
320 E : }
321 E : }
322 :
323 E : return false;
324 E : }
325 :
326 : // Returns the heap supported features.
327 E : uint32 GetHeapFeatures() {
328 E : return heap_manager_->GetHeapFromId(heap_id_)->GetHeapFeatures();
329 E : }
330 :
331 : private:
332 : // The heap manager owning the underlying heap.
333 : TestBlockHeapManager* heap_manager_;
334 :
335 : // The underlying heap.
336 : HeapId heap_id_;
337 : };
338 :
339 : // A value-parameterized test class for testing the BlockHeapManager class.
340 : //
341 : // The parameter value is used to enable/disable the usage of the CTMalloc heap.
342 : class BlockHeapManagerTest
343 : : public testing::TestWithAsanRuntime,
344 : public testing::WithParamInterface<bool> {
345 : public:
346 : typedef TestBlockHeapManager::ShardedBlockQuarantine ShardedBlockQuarantine;
347 : typedef testing::TestWithAsanRuntime Super;
348 :
349 : BlockHeapManagerTest()
350 : : TestWithAsanRuntime(&test_runtime_), heap_manager_(),
351 E : test_zebra_block_heap_(nullptr) {
352 E : }
353 :
354 E : virtual void SetUp() OVERRIDE {
355 E : Super::SetUp();
356 : heap_manager_ = reinterpret_cast<TestBlockHeapManager*>(
357 E : test_runtime_.heap_manager_.get());
358 :
359 : // Set the error callback that the manager will use.
360 : heap_manager_->SetHeapErrorCallback(
361 E : base::Bind(&BlockHeapManagerTest::OnHeapError, base::Unretained(this)));
362 :
363 : ::common::AsanParameters params;
364 E : ::common::SetDefaultAsanParameters(¶ms);
365 E : params.enable_ctmalloc = GetParam();
366 E : heap_manager_->SetParameters(params);
367 E : }
368 :
369 E : virtual void TearDown() OVERRIDE {
370 E : heap_manager_ = nullptr;
371 E : Super::TearDown();
372 E : }
373 :
374 E : void OnHeapError(AsanErrorInfo* error) {
375 E : errors_.push_back(*error);
376 E : }
377 :
378 : // Calculates the Asan size for an allocation of @p user_size bytes.
379 E : size_t GetAllocSize(size_t user_size) {
380 E : BlockLayout layout = {};
381 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, user_size, 0,
382 : heap_manager_->parameters().trailer_padding_size + sizeof(BlockTrailer),
383 : &layout));
384 E : return layout.block_size;
385 E : }
386 :
387 E : void EnableTestZebraBlockHeap() {
388 : // Erase previous ZebraBlockHeap.
389 E : if (heap_manager_->zebra_block_heap_ != 0) {
390 i : heap_manager_->heaps_.erase(heap_manager_->zebra_block_heap_);
391 i : delete heap_manager_->zebra_block_heap_;
392 : }
393 : // Plug a mock ZebraBlockHeap by default disabled.
394 E : test_zebra_block_heap_ = new TestZebraBlockHeap();
395 E : heap_manager_->zebra_block_heap_ = test_zebra_block_heap_;
396 : TestBlockHeapManager::HeapMetadata heap_metadata =
397 E : { test_zebra_block_heap_, false };
398 : auto result = heap_manager_->heaps_.insert(std::make_pair(
399 E : test_zebra_block_heap_, heap_metadata));
400 E : heap_manager_->zebra_block_heap_id_ = heap_manager_->GetHeapId(result);
401 :
402 : // Turn on the zebra_block_heap_enabled flag.
403 E : ::common::AsanParameters params = heap_manager_->parameters();
404 E : params.enable_zebra_block_heap = true;
405 E : heap_manager_->set_parameters(params);
406 E : }
407 :
408 E : void EnableLargeBlockHeap(size_t large_allocation_threshold) {
409 E : ::common::AsanParameters params = heap_manager_->parameters();
410 E : params.enable_large_block_heap = true;
411 E : params.large_allocation_threshold = large_allocation_threshold;
412 E : heap_manager_->set_parameters(params);
413 E : CHECK_NE(0u, heap_manager_->large_block_heap_id_);
414 E : }
415 :
416 : // Verifies that [alloc, alloc + size) is accessible, and that
417 : // [alloc - 1] and [alloc+size] are poisoned.
418 E : void VerifyAllocAccess(void* alloc, size_t size) {
419 E : uint8* mem = reinterpret_cast<uint8*>(alloc);
420 E : ASSERT_FALSE(Shadow::IsAccessible(mem - 1));
421 E : ASSERT_TRUE(Shadow::IsLeftRedzone(mem - 1));
422 E : for (size_t i = 0; i < size; ++i)
423 E : ASSERT_TRUE(Shadow::IsAccessible(mem + i));
424 E : ASSERT_FALSE(Shadow::IsAccessible(mem + size));
425 E : }
426 :
427 : // Verifies that [alloc-1, alloc+size] is poisoned.
428 E : void VerifyFreedAccess(void* alloc, size_t size) {
429 E : uint8* mem = reinterpret_cast<uint8*>(alloc);
430 E : ASSERT_FALSE(Shadow::IsAccessible(mem - 1));
431 E : ASSERT_TRUE(Shadow::IsLeftRedzone(mem - 1));
432 E : for (size_t i = 0; i < size; ++i) {
433 E : ASSERT_FALSE(Shadow::IsAccessible(mem + i));
434 E : ASSERT_EQ(Shadow::GetShadowMarkerForAddress(mem + i),
435 : kHeapFreedMarker);
436 E : }
437 E : ASSERT_FALSE(Shadow::IsAccessible(mem + size));
438 E : }
439 :
440 : protected:
441 : // The heap manager used in these tests.
442 : TestBlockHeapManager* heap_manager_;
443 :
444 : // Info about the last errors reported.
445 : std::vector<AsanErrorInfo> errors_;
446 :
447 : // The mock ZebraBlockHeap used in the tests.
448 : TestZebraBlockHeap* test_zebra_block_heap_;
449 :
450 : // The runtime used by those tests.
451 : TestAsanRuntime test_runtime_;
452 : };
453 :
454 : } // namespace
455 :
456 : // Instantiate the test cases.
457 : INSTANTIATE_TEST_CASE_P(BlockHeapManagerTests,
458 : BlockHeapManagerTest,
459 E : ::testing::Bool());
460 :
461 E : TEST_P(BlockHeapManagerTest, AllocAndFree) {
462 E : const size_t kAllocSize = 17;
463 E : HeapId heap_id = heap_manager_->CreateHeap();
464 E : EXPECT_NE(0u, heap_id);
465 E : void* alloc = heap_manager_->Allocate(heap_id, kAllocSize);
466 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
467 E : EXPECT_LE(kAllocSize, heap_manager_->Size(heap_id, alloc));
468 E : EXPECT_TRUE(heap_manager_->Free(heap_id, alloc));
469 E : EXPECT_TRUE(heap_manager_->DestroyHeap(heap_id));
470 E : }
471 :
472 E : TEST_P(BlockHeapManagerTest, FreeNullPointer) {
473 E : HeapId heap_id = heap_manager_->CreateHeap();
474 E : EXPECT_NE(0u, heap_id);
475 E : EXPECT_TRUE(heap_manager_->Free(heap_id, static_cast<void*>(nullptr)));
476 E : EXPECT_TRUE(heap_manager_->DestroyHeap(heap_id));
477 E : }
478 :
479 E : TEST_P(BlockHeapManagerTest, FreeUnguardedAlloc) {
480 E : const size_t kAllocSize = 100;
481 E : ::common::AsanParameters params = heap_manager_->parameters();
482 E : params.allocation_guard_rate = 0.0;
483 E : heap_manager_->set_parameters(params);
484 :
485 E : ScopedHeap heap(heap_manager_);
486 :
487 E : void* heap_alloc = heap.Allocate(kAllocSize);
488 E : EXPECT_NE(static_cast<void*>(nullptr), heap_alloc);
489 :
490 E : void* process_heap_alloc = ::HeapAlloc(::GetProcessHeap(), 0, kAllocSize);
491 E : EXPECT_NE(static_cast<void*>(nullptr), process_heap_alloc);
492 :
493 : BlockHeapInterface* process_heap = heap_manager_->GetHeapFromId(
494 E : heap_manager_->process_heap());
495 E : void* process_heap_wrapper_alloc = process_heap->Allocate(kAllocSize);
496 E : EXPECT_NE(static_cast<void*>(nullptr), process_heap_wrapper_alloc);
497 :
498 E : EXPECT_TRUE(heap_manager_->Free(heap.Id(), heap_alloc));
499 : EXPECT_TRUE(heap_manager_->Free(heap_manager_->process_heap(),
500 E : process_heap_alloc));
501 : EXPECT_TRUE(heap_manager_->Free(heap_manager_->process_heap(),
502 E : process_heap_wrapper_alloc));
503 E : }
504 :
505 E : TEST_P(BlockHeapManagerTest, PopOnSetQuarantineMaxSize) {
506 E : const size_t kAllocSize = 100;
507 E : size_t real_alloc_size = GetAllocSize(kAllocSize);
508 E : ScopedHeap heap(heap_manager_);
509 E : void* mem = heap.Allocate(kAllocSize);
510 E : ASSERT_FALSE(heap.InQuarantine(mem));
511 :
512 E : ::common::AsanParameters parameters = heap_manager_->parameters();
513 E : parameters.quarantine_size = real_alloc_size;
514 E : heap_manager_->set_parameters(parameters);
515 :
516 E : ASSERT_TRUE(heap.Free(mem));
517 E : ASSERT_TRUE(heap.InQuarantine(mem));
518 :
519 : // We resize the quarantine to a smaller size, the block should pop out.
520 E : parameters.quarantine_size = real_alloc_size - 1;
521 E : heap_manager_->set_parameters(parameters);
522 E : ASSERT_FALSE(heap.InQuarantine(mem));
523 E : }
524 :
525 E : TEST_P(BlockHeapManagerTest, Quarantine) {
526 E : const size_t kAllocSize = 100;
527 E : size_t real_alloc_size = GetAllocSize(kAllocSize);
528 E : const size_t number_of_allocs = 16;
529 E : ScopedHeap heap(heap_manager_);
530 :
531 E : ::common::AsanParameters parameters = heap_manager_->parameters();
532 E : parameters.quarantine_size = real_alloc_size * number_of_allocs;
533 E : heap_manager_->set_parameters(parameters);
534 :
535 : // Allocate a bunch of blocks until exactly one is removed from the
536 : // quarantine.
537 E : std::vector<void*> blocks;
538 E : for (size_t i = 0; i < number_of_allocs + 1; ++i) {
539 E : void* mem = heap.Allocate(kAllocSize);
540 E : ASSERT_TRUE(mem != nullptr);
541 E : heap.Free(mem);
542 E : blocks.push_back(mem);
543 E : if (i < number_of_allocs)
544 E : ASSERT_TRUE(heap.InQuarantine(mem));
545 E : }
546 :
547 E : size_t blocks_in_quarantine = 0;
548 E : for (size_t i = 0; i < blocks.size(); ++i) {
549 E : if (heap.InQuarantine(blocks[i]))
550 E : ++blocks_in_quarantine;
551 E : }
552 E : EXPECT_EQ(number_of_allocs, blocks_in_quarantine);
553 E : }
554 :
555 E : TEST_P(BlockHeapManagerTest, QuarantineLargeBlock) {
556 E : const size_t kLargeAllocSize = 100;
557 E : const size_t kSmallAllocSize = 25;
558 E : size_t real_large_alloc_size = GetAllocSize(kLargeAllocSize);
559 E : size_t real_small_alloc_size = GetAllocSize(kSmallAllocSize);
560 :
561 E : ScopedHeap heap(heap_manager_);
562 E : ::common::AsanParameters parameters = heap_manager_->parameters();
563 E : parameters.quarantine_size = real_large_alloc_size;
564 E : parameters.quarantine_block_size = real_large_alloc_size;
565 E : heap_manager_->set_parameters(parameters);
566 :
567 : // A block larger than the quarantine should not make it in.
568 E : void* mem1 = heap.Allocate(real_large_alloc_size + 1);
569 E : ASSERT_NE(static_cast<void*>(nullptr), mem1);
570 E : EXPECT_TRUE(heap.Free(mem1));
571 E : EXPECT_FALSE(heap.InQuarantine(mem1));
572 E : EXPECT_EQ(0u, heap.GetQuarantine()->GetCount());
573 :
574 : // A smaller block should make it because our current max block size allows
575 : // it.
576 E : void* mem2 = heap.Allocate(kSmallAllocSize);
577 E : ASSERT_NE(static_cast<void*>(nullptr), mem2);
578 E : EXPECT_TRUE(heap.Free(mem2));
579 E : EXPECT_TRUE(heap.InQuarantine(mem2));
580 :
581 E : parameters.quarantine_block_size = real_small_alloc_size - 1;
582 E : heap_manager_->set_parameters(parameters);
583 :
584 : // A second small block should not make it in since we changed the block size.
585 : // However, the other block should remain in the quarantine.
586 E : void* mem3 = heap.Allocate(kSmallAllocSize);
587 E : ASSERT_NE(static_cast<void*>(nullptr), mem3);
588 E : EXPECT_TRUE(heap.Free(mem3));
589 E : EXPECT_TRUE(heap.InQuarantine(mem2));
590 E : EXPECT_FALSE(heap.InQuarantine(mem3));
591 E : }
592 :
593 E : TEST_P(BlockHeapManagerTest, UnpoisonsQuarantine) {
594 E : const size_t kAllocSize = 100;
595 E : const size_t real_alloc_size = GetAllocSize(kAllocSize);
596 :
597 E : ScopedHeap heap(heap_manager_);
598 E : ::common::AsanParameters parameters = heap_manager_->parameters();
599 E : parameters.quarantine_size = real_alloc_size;
600 E : heap_manager_->set_parameters(parameters);
601 :
602 : // Allocate a memory block and directly free it, this puts it in the
603 : // quarantine.
604 E : void* mem = heap.Allocate(kAllocSize);
605 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
606 E : ASSERT_TRUE(heap.Free(mem));
607 E : ASSERT_TRUE(heap.InQuarantine(mem));
608 :
609 : // Assert that the shadow memory has been correctly poisoned.
610 E : intptr_t mem_start = reinterpret_cast<intptr_t>(BlockGetHeaderFromBody(mem));
611 E : ASSERT_EQ(0, (mem_start & 7) );
612 E : size_t shadow_start = mem_start >> 3;
613 E : size_t shadow_alloc_size = real_alloc_size >> 3;
614 E : for (size_t i = shadow_start; i < shadow_start + shadow_alloc_size; ++i)
615 E : ASSERT_NE(kHeapAddressableMarker, Shadow::shadow()[i]);
616 :
617 : // Flush the quarantine.
618 E : heap.FlushQuarantine();
619 :
620 : // Assert that the quarantine has been correctly unpoisoned.
621 E : for (size_t i = shadow_start; i < shadow_start + shadow_alloc_size; ++i) {
622 : if ((heap.GetHeapFeatures() &
623 E : HeapInterface::kHeapReportsReservations) != 0) {
624 E : ASSERT_EQ(kAsanReservedMarker, Shadow::shadow()[i]);
625 E : } else {
626 E : ASSERT_EQ(kHeapAddressableMarker, Shadow::shadow()[i]);
627 : }
628 E : }
629 E : }
630 :
631 E : TEST_P(BlockHeapManagerTest, QuarantineIsShared) {
632 E : const size_t kAllocSize = 100;
633 E : const size_t real_alloc_size = GetAllocSize(kAllocSize);
634 E : ScopedHeap heap_1(heap_manager_);
635 E : ScopedHeap heap_2(heap_manager_);
636 :
637 E : ASSERT_EQ(heap_1.GetQuarantine(), heap_2.GetQuarantine());
638 :
639 E : ::common::AsanParameters parameters = heap_manager_->parameters();
640 E : parameters.quarantine_size = real_alloc_size * 4;
641 E : heap_manager_->set_parameters(parameters);
642 :
643 E : void* heap_1_mem1 = heap_1.Allocate(kAllocSize);
644 E : ASSERT_NE(static_cast<void*>(nullptr), heap_1_mem1);
645 E : void* heap_1_mem2 = heap_1.Allocate(kAllocSize);
646 E : ASSERT_NE(static_cast<void*>(nullptr), heap_1_mem2);
647 E : void* heap_2_mem1 = heap_2.Allocate(kAllocSize);
648 E : ASSERT_NE(static_cast<void*>(nullptr), heap_2_mem1);
649 E : void* heap_2_mem2 = heap_2.Allocate(kAllocSize);
650 E : ASSERT_NE(static_cast<void*>(nullptr), heap_2_mem2);
651 :
652 E : EXPECT_TRUE(heap_1.Free(heap_1_mem1));
653 E : EXPECT_TRUE(heap_1.Free(heap_1_mem2));
654 E : EXPECT_TRUE(heap_2.Free(heap_2_mem1));
655 E : EXPECT_TRUE(heap_2.Free(heap_2_mem2));
656 :
657 E : EXPECT_TRUE(heap_1.InQuarantine(heap_1_mem1));
658 E : EXPECT_TRUE(heap_1.InQuarantine(heap_1_mem2));
659 E : EXPECT_TRUE(heap_2.InQuarantine(heap_2_mem1));
660 E : EXPECT_TRUE(heap_2.InQuarantine(heap_2_mem2));
661 :
662 E : BlockQuarantineInterface* quarantine = heap_1.GetQuarantine();
663 E : EXPECT_EQ(4, quarantine->GetCount());
664 E : heap_2.ReleaseHeap();
665 E : EXPECT_EQ(2, quarantine->GetCount());
666 E : heap_1.ReleaseHeap();
667 E : EXPECT_EQ(0, quarantine->GetCount());
668 E : }
669 :
670 E : TEST_P(BlockHeapManagerTest, AllocZeroBytes) {
671 E : ScopedHeap heap(heap_manager_);
672 E : void* mem1 = heap.Allocate(0);
673 E : ASSERT_NE(static_cast<void*>(nullptr), mem1);
674 E : void* mem2 = heap.Allocate(0);
675 E : ASSERT_NE(static_cast<void*>(nullptr), mem2);
676 E : ASSERT_NE(mem1, mem2);
677 E : ASSERT_TRUE(heap.Free(mem1));
678 E : ASSERT_TRUE(heap.Free(mem2));
679 E : }
680 :
681 E : TEST_P(BlockHeapManagerTest, AllocInvalidBlockSize) {
682 E : ScopedHeap heap(heap_manager_);
683 E : const size_t kInvalidSize = 0xffffffff;
684 E : void* mem = heap.Allocate(0xffffffff);
685 E : ASSERT_EQ(static_cast<void*>(nullptr), mem);
686 E : }
687 :
688 E : TEST_P(BlockHeapManagerTest, Size) {
689 E : const size_t kMaxAllocSize = 134584;
690 E : ScopedHeap heap(heap_manager_);
691 E : for (size_t size = 10; size < kMaxAllocSize; size = size * 5 + 123) {
692 E : void* mem = heap.Allocate(size);
693 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
694 E : ASSERT_EQ(size, heap_manager_->Size(heap.Id(), mem));
695 E : ASSERT_TRUE(heap.Free(mem));
696 E : }
697 E : }
698 :
699 E : TEST_P(BlockHeapManagerTest, AllocsAccessibility) {
700 E : const size_t kMaxAllocSize = 134584;
701 E : ScopedHeap heap(heap_manager_);
702 : // Ensure that the quarantine is large enough to keep the allocated blocks in
703 : // this test.
704 E : ::common::AsanParameters parameters = heap_manager_->parameters();
705 E : parameters.quarantine_size = kMaxAllocSize * 2;
706 E : heap_manager_->set_parameters(parameters);
707 E : for (size_t size = 10; size < kMaxAllocSize; size = size * 5 + 123) {
708 : // Do an alloc/free and test that access is correctly managed.
709 E : void* mem = heap.Allocate(size);
710 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
711 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(mem, size));
712 E : ASSERT_TRUE(heap.Free(mem));
713 E : ASSERT_NO_FATAL_FAILURE(VerifyFreedAccess(mem, size));
714 E : }
715 E : }
716 :
717 E : TEST_P(BlockHeapManagerTest, LockUnlock) {
718 E : ScopedHeap heap(heap_manager_);
719 : // We can't really test these, aside from not crashing.
720 E : ASSERT_NO_FATAL_FAILURE(heap_manager_->Lock(heap.Id()));
721 E : ASSERT_NO_FATAL_FAILURE(heap_manager_->Unlock(heap.Id()));
722 E : }
723 :
724 E : TEST_P(BlockHeapManagerTest, CaptureTID) {
725 E : const size_t kAllocSize = 13;
726 E : ScopedHeap heap(heap_manager_);
727 : // Ensure that the quarantine is large enough to keep this block.
728 E : ::common::AsanParameters parameters = heap_manager_->parameters();
729 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
730 E : heap_manager_->set_parameters(parameters);
731 E : uint8* mem = static_cast<uint8*>(heap.Allocate(kAllocSize));
732 E : ASSERT_TRUE(heap.Free(mem));
733 : EXPECT_EQ(QUARANTINED_BLOCK,
734 E : static_cast<BlockState>(BlockGetHeaderFromBody(mem)->state));
735 :
736 E : BlockHeader* header = BlockGetHeaderFromBody(mem);
737 E : ASSERT_NE(static_cast<BlockHeader*>(nullptr), header);
738 E : BlockInfo block_info = {};
739 E : EXPECT_TRUE(BlockInfoFromMemory(header, &block_info));
740 E : EXPECT_NE(static_cast<BlockTrailer*>(nullptr), block_info.trailer);
741 :
742 E : EXPECT_EQ(block_info.trailer->alloc_tid, ::GetCurrentThreadId());
743 E : EXPECT_EQ(block_info.trailer->free_tid, ::GetCurrentThreadId());
744 E : }
745 :
746 E : TEST_P(BlockHeapManagerTest, QuarantineDoesntAlterBlockContents) {
747 E : const size_t kAllocSize = 13;
748 E : ScopedHeap heap(heap_manager_);
749 : // Ensure that the quarantine is large enough to keep this block.
750 E : ::common::AsanParameters parameters = heap_manager_->parameters();
751 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
752 E : heap_manager_->set_parameters(parameters);
753 E : void* mem = heap.Allocate(kAllocSize);
754 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
755 E : base::RandBytes(mem, kAllocSize);
756 :
757 E : unsigned char sha1_before[base::kSHA1Length] = {};
758 : base::SHA1HashBytes(reinterpret_cast<unsigned char*>(mem),
759 : kAllocSize,
760 E : sha1_before);
761 :
762 E : BlockHeader* header = BlockGetHeaderFromBody(mem);
763 :
764 E : ASSERT_TRUE(heap.Free(mem));
765 E : EXPECT_EQ(QUARANTINED_BLOCK, static_cast<BlockState>(header->state));
766 :
767 E : unsigned char sha1_after[base::kSHA1Length] = {};
768 : base::SHA1HashBytes(reinterpret_cast<unsigned char*>(mem),
769 : kAllocSize,
770 E : sha1_after);
771 :
772 E : EXPECT_EQ(0, memcmp(sha1_before, sha1_after, base::kSHA1Length));
773 E : }
774 :
775 E : TEST_P(BlockHeapManagerTest, SetTrailerPaddingSize) {
776 E : const size_t kAllocSize = 13;
777 E : ScopedHeap heap(heap_manager_);
778 : // Ensure that the quarantine is large enough to keep this block with the
779 : // extra padding.
780 E : ::common::AsanParameters parameters = heap_manager_->parameters();
781 E : parameters.quarantine_size = GetAllocSize(kAllocSize) * 5;
782 E : heap_manager_->set_parameters(parameters);
783 E : size_t original_alloc_size = GetAllocSize(kAllocSize);
784 E : ::common::AsanParameters original_parameter = heap_manager_->parameters();
785 :
786 E : for (size_t padding = 0; padding < 16; ++padding) {
787 E : ::common::AsanParameters new_parameter = original_parameter;
788 : new_parameter.trailer_padding_size =
789 E : original_parameter.trailer_padding_size + padding;
790 E : heap_manager_->set_parameters(new_parameter);
791 E : size_t augmented_alloc_size = GetAllocSize(kAllocSize);
792 E : EXPECT_GE(augmented_alloc_size, original_alloc_size);
793 :
794 E : void* mem = heap.Allocate(kAllocSize);
795 E : ASSERT_TRUE(mem != nullptr);
796 :
797 E : size_t offset = kAllocSize;
798 E : for (; offset < augmented_alloc_size - sizeof(BlockHeader);
799 E : ++offset) {
800 : EXPECT_FALSE(Shadow::IsAccessible(
801 E : reinterpret_cast<const uint8*>(mem) + offset));
802 E : }
803 E : ASSERT_TRUE(heap.Free(mem));
804 E : }
805 E : heap_manager_->set_parameters(original_parameter);
806 E : }
807 :
808 E : TEST_P(BlockHeapManagerTest, BlockChecksumUpdatedWhenEnterQuarantine) {
809 E : const size_t kAllocSize = 100;
810 E : size_t real_alloc_size = GetAllocSize(kAllocSize);
811 E : ScopedHeap heap(heap_manager_);
812 :
813 E : ::common::AsanParameters parameters = heap_manager_->parameters();
814 E : parameters.quarantine_size = real_alloc_size;
815 E : heap_manager_->set_parameters(parameters);
816 :
817 E : void* mem = heap.Allocate(kAllocSize);
818 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
819 E : BlockInfo block_info = {};
820 E : EXPECT_TRUE(Shadow::BlockInfoFromShadow(mem, &block_info));
821 E : EXPECT_TRUE(BlockChecksumIsValid(block_info));
822 E : heap.Free(mem);
823 E : EXPECT_TRUE(BlockChecksumIsValid(block_info));
824 E : ASSERT_TRUE(heap.InQuarantine(mem));
825 E : }
826 :
827 : static const size_t kChecksumRepeatCount = 10;
828 :
829 E : TEST_P(BlockHeapManagerTest, CorruptAsEntersQuarantine) {
830 E : const size_t kAllocSize = 100;
831 E : ::common::AsanParameters parameters = heap_manager_->parameters();
832 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
833 E : heap_manager_->set_parameters(parameters);
834 :
835 E : ScopedHeap heap(heap_manager_);
836 : // This can fail because of a checksum collision. However, we run it a
837 : // handful of times to keep the chances as small as possible.
838 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
839 E : heap.FlushQuarantine();
840 E : void* mem = heap.Allocate(kAllocSize);
841 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
842 E : reinterpret_cast<int*>(mem)[-1] = rand();
843 E : EXPECT_TRUE(heap.Free(mem));
844 :
845 : // Try again for all but the last attempt if this appears to have failed.
846 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
847 i : continue;
848 :
849 E : ASSERT_EQ(1u, errors_.size());
850 E : ASSERT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
851 E : ASSERT_EQ(mem, errors_[0].location);
852 :
853 E : break;
854 i : }
855 E : }
856 :
857 E : TEST_P(BlockHeapManagerTest, CorruptAsExitsQuarantine) {
858 E : const size_t kAllocSize = 100;
859 E : ::common::AsanParameters parameters = heap_manager_->parameters();
860 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
861 E : heap_manager_->set_parameters(parameters);
862 :
863 E : ScopedHeap heap(heap_manager_);
864 : // This can fail because of a checksum collision. However, we run it a
865 : // handful of times to keep the chances as small as possible.
866 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
867 E : heap.FlushQuarantine();
868 E : void* mem = heap.Allocate(kAllocSize);
869 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
870 E : EXPECT_TRUE(heap.Free(mem));
871 E : EXPECT_TRUE(errors_.empty());
872 :
873 : // Change some of the block content and then flush the quarantine. The block
874 : // hash should be invalid and it should cause an error to be fired.
875 E : reinterpret_cast<int32*>(mem)[0] = rand();
876 E : heap.FlushQuarantine();
877 :
878 : // Try again for all but the last attempt if this appears to have failed.
879 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
880 i : continue;
881 :
882 E : EXPECT_EQ(1u, errors_.size());
883 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
884 : EXPECT_EQ(
885 : reinterpret_cast<BlockHeader*>(mem) - 1,
886 E : reinterpret_cast<BlockHeader*>(errors_[0].location));
887 :
888 E : break;
889 i : }
890 E : }
891 :
892 E : TEST_P(BlockHeapManagerTest, CorruptAsExitsQuarantineOnHeapDestroy) {
893 E : const size_t kAllocSize = 100;
894 E : ::common::AsanParameters parameters = heap_manager_->parameters();
895 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
896 E : heap_manager_->set_parameters(parameters);
897 :
898 : // This can fail because of a checksum collision. However, we run it a
899 : // handful of times to keep the chances as small as possible.
900 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
901 E : void* mem = nullptr;
902 : {
903 E : ScopedHeap heap(heap_manager_);
904 E : heap.FlushQuarantine();
905 E : mem = heap.Allocate(kAllocSize);
906 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
907 E : EXPECT_TRUE(heap.Free(mem));
908 E : EXPECT_TRUE(errors_.empty());
909 :
910 : // Change some of the block content to invalidate the block's hash.
911 E : reinterpret_cast<int32*>(mem)[0] = rand();
912 E : }
913 :
914 : // The destructor of |heap| should be called and all the quarantined blocks
915 : // belonging to this heap should be freed, which should trigger an error as
916 : // the block is now corrupt.
917 :
918 : // Try again for all but the last attempt if this appears to have failed.
919 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
920 i : continue;
921 :
922 E : EXPECT_EQ(1u, errors_.size());
923 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
924 : EXPECT_EQ(reinterpret_cast<BlockHeader*>(mem) - 1,
925 E : reinterpret_cast<BlockHeader*>(errors_[0].location));
926 :
927 E : break;
928 i : }
929 E : }
930 :
931 E : TEST_P(BlockHeapManagerTest, CorruptHeapOnTrimQuarantine) {
932 E : const size_t kAllocSize = 100;
933 E : ::common::AsanParameters parameters = heap_manager_->parameters();
934 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
935 E : heap_manager_->set_parameters(parameters);
936 :
937 : // This can fail because of a checksum collision. However, we run it a
938 : // handful of times to keep the chances as small as possible.
939 E : for (size_t i = 0; i < kChecksumRepeatCount; ++i) {
940 E : void* mem = nullptr;
941 : {
942 E : ScopedHeap heap(heap_manager_);
943 E : heap.FlushQuarantine();
944 E : mem = heap.Allocate(kAllocSize);
945 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
946 E : EXPECT_TRUE(heap.Free(mem));
947 E : EXPECT_TRUE(errors_.empty());
948 :
949 : // Change some of the block content to invalidate the block's hash.
950 E : reinterpret_cast<int32*>(mem)[0] = rand();
951 E : }
952 :
953 : // The destructor of |heap| should be called and all the quarantined blocks
954 : // belonging to this heap should be freed, which should trigger an error as
955 : // the block is now corrupt.
956 :
957 : // Try again for all but the last attempt if this appears to have failed.
958 E : if (errors_.empty() && i + 1 < kChecksumRepeatCount)
959 i : continue;
960 :
961 E : EXPECT_EQ(1u, errors_.size());
962 E : EXPECT_EQ(CORRUPT_BLOCK, errors_[0].error_type);
963 : EXPECT_EQ(reinterpret_cast<BlockHeader*>(mem) - 1,
964 E : reinterpret_cast<BlockHeader*>(errors_[0].location));
965 :
966 E : break;
967 i : }
968 E : }
969 :
970 E : TEST_P(BlockHeapManagerTest, DoubleFree) {
971 E : const size_t kAllocSize = 100;
972 E : ::common::AsanParameters parameters = heap_manager_->parameters();
973 E : parameters.quarantine_size = GetAllocSize(kAllocSize);
974 E : heap_manager_->set_parameters(parameters);
975 :
976 E : ScopedHeap heap(heap_manager_);
977 E : void* mem = heap.Allocate(kAllocSize);
978 E : ASSERT_NE(static_cast<void*>(nullptr), mem);
979 E : EXPECT_TRUE(heap.Free(mem));
980 E : EXPECT_FALSE(heap.Free(mem));
981 :
982 E : EXPECT_EQ(1u, errors_.size());
983 E : EXPECT_EQ(DOUBLE_FREE, errors_[0].error_type);
984 E : EXPECT_EQ(mem, errors_[0].location);
985 E : }
986 :
987 E : TEST_P(BlockHeapManagerTest, SubsampledAllocationGuards) {
988 E : ::common::AsanParameters parameters = heap_manager_->parameters();
989 E : parameters.allocation_guard_rate = 0.5;
990 E : heap_manager_->set_parameters(parameters);
991 E : ScopedHeap heap(heap_manager_);
992 :
993 E : size_t guarded_allocations = 0;
994 E : size_t unguarded_allocations = 0;
995 :
996 : // Make a handful of allocations.
997 E : const size_t kAllocationCount = 10000;
998 : const size_t kAllocationSizes[] = {
999 E : 1, 2, 4, 8, 14, 30, 128, 237, 500, 1000, 2036 };
1000 E : std::vector<void*> allocations;
1001 E : for (size_t i = 0; i < kAllocationCount; ++i) {
1002 E : size_t alloc_size = kAllocationSizes[i % arraysize(kAllocationSizes)];
1003 E : void* alloc = heap.Allocate(alloc_size);
1004 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1005 :
1006 E : for (size_t i = 0; i < alloc_size; ++i)
1007 E : EXPECT_TRUE(Shadow::IsAccessible(reinterpret_cast<uint8*>(alloc) + i));
1008 :
1009 : // Determine if the allocation has guards or not.
1010 E : BlockHeader* header = BlockGetHeaderFromBody(alloc);
1011 E : if (header == nullptr) {
1012 E : ++unguarded_allocations;
1013 E : } else {
1014 E : ++guarded_allocations;
1015 : }
1016 :
1017 : if ((heap.GetHeapFeatures() &
1018 E : HeapInterface::kHeapSupportsGetAllocationSize) != 0) {
1019 : if ((heap.GetHeapFeatures() &
1020 E : HeapInterface::kHeapGetAllocationSizeIsUpperBound) != 0) {
1021 E : EXPECT_LE(alloc_size, heap_manager_->Size(heap.Id(), alloc));
1022 E : } else {
1023 E : EXPECT_EQ(alloc_size, heap_manager_->Size(heap.Id(), alloc));
1024 : }
1025 : }
1026 :
1027 : // Delete half of the allocations immediately, and keep half of them
1028 : // around for longer. This puts more of a stress test on the quarantine
1029 : // itself.
1030 E : if (base::RandDouble() < 0.5) {
1031 E : EXPECT_TRUE(heap.Free(alloc));
1032 E : } else {
1033 E : allocations.push_back(alloc);
1034 : }
1035 E : }
1036 :
1037 : // Free the outstanding allocations.
1038 E : for (size_t i = 0; i < allocations.size(); ++i)
1039 E : EXPECT_TRUE(heap.Free(allocations[i]));
1040 :
1041 : // Clear the quarantine. This should free up the remaining instrumented
1042 : // but quarantined blocks.
1043 E : EXPECT_NO_FATAL_FAILURE(heap.FlushQuarantine());
1044 :
1045 : // This could theoretically fail, but that would imply an extremely bad
1046 : // implementation of the underlying random number generator. There are 10000
1047 : // allocations. Since this is effectively a fair coin toss we expect a
1048 : // standard deviation of 0.5 * sqrt(10000) = 50. A 10% margin is
1049 : // 1000 / 50 = 20 standard deviations. For |z| > 20, the p-value is 5.5e-89,
1050 : // or 89 nines of confidence. That should keep any flake largely at bay.
1051 : // Thus, if this fails it's pretty much certain the implementation is at
1052 : // fault.
1053 E : EXPECT_LT(4 * kAllocationCount / 10, guarded_allocations);
1054 E : EXPECT_GT(6 * kAllocationCount / 10, guarded_allocations);
1055 E : }
1056 :
1057 : // Ensures that the ZebraBlockHeap overrides the provided heap.
1058 E : TEST_P(BlockHeapManagerTest, ZebraHeapIdInTrailerAfterAllocation) {
1059 E : EnableTestZebraBlockHeap();
1060 E : ScopedHeap heap(heap_manager_);
1061 E : const size_t kAllocSize = 0x100;
1062 E : void* alloc = heap.Allocate(kAllocSize);
1063 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1064 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1065 :
1066 : // Get the heap_id from the block trailer.
1067 E : BlockInfo block_info = {};
1068 E : EXPECT_TRUE(Shadow::BlockInfoFromShadow(alloc, &block_info));
1069 :
1070 : {
1071 E : ScopedBlockAccess block_access(block_info);
1072 : // The heap_id stored in the block trailer should match the zebra heap id.
1073 : EXPECT_EQ(heap_manager_->zebra_block_heap_id_,
1074 E : block_info.trailer->heap_id);
1075 E : }
1076 :
1077 E : EXPECT_TRUE(heap.Free(alloc));
1078 E : }
1079 :
1080 : // Ensures that the provided heap is used when the ZebraBlockHeap cannot handle
1081 : // the allocation.
1082 E : TEST_P(BlockHeapManagerTest, DefaultHeapIdInTrailerWhenZebraHeapIsFull) {
1083 E : EnableTestZebraBlockHeap();
1084 E : ScopedHeap heap(heap_manager_);
1085 E : const size_t kAllocSize = 0x100;
1086 : // Refuse allocations on the ZebraBlockHeap.
1087 E : test_zebra_block_heap_->set_refuse_allocations(true);
1088 :
1089 E : void* alloc = heap.Allocate(kAllocSize);
1090 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1091 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1092 :
1093 : // Get the heap_id from the block trailer.
1094 E : BlockInfo block_info = {};
1095 E : EXPECT_TRUE(Shadow::BlockInfoFromShadow(alloc, &block_info));
1096 : {
1097 E : ScopedBlockAccess block_access(block_info);
1098 : // The heap_id stored in the block trailer match the provided heap.
1099 E : EXPECT_EQ(heap.Id(), block_info.trailer->heap_id);
1100 E : }
1101 E : EXPECT_TRUE(heap.Free(alloc));
1102 E : }
1103 :
1104 : // Allocations larger than the page size (4KB) will not be served by the zebra
1105 : // heap.
1106 E : TEST_P(BlockHeapManagerTest, AllocStress) {
1107 E : EnableTestZebraBlockHeap();
1108 E : ScopedHeap heap(heap_manager_);
1109 E : for (size_t i = 0; i < 3000; ++i) {
1110 : // Sometimes allocate more than one page, to ensure that allocations get
1111 : // spread across the ZebraBlockheap and normal heaps.
1112 E : const size_t kAllocSize = (i * 997) % (9 * 1024);
1113 E : void* alloc = heap.Allocate(kAllocSize);
1114 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1115 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1116 : // Free should succeed, even if the block is quarantined.
1117 E : EXPECT_TRUE(heap.Free(alloc));
1118 E : }
1119 E : }
1120 :
1121 E : TEST_P(BlockHeapManagerTest, AllocFromRateTargetedHeap) {
1122 E : ScopedHeap heap(heap_manager_);
1123 E : std::vector<void*> alloc_to_free;
1124 : const size_t kAllocSize1 =
1125 E : TestBlockHeapManager::kDefaultRateTargetedHeapsMinBlockSize[0];
1126 : const size_t kAllocSize2 =
1127 E : TestBlockHeapManager::kDefaultRateTargetedHeapsMinBlockSize[1];
1128 E : const size_t kIterations = 10;
1129 :
1130 : // The first iteration will be used to initialize the allocation sites
1131 : // frequency min and max.
1132 E : for (size_t c = 0; c < kIterations + 1; ++c) {
1133 E : void* alloc = heap.Allocate(kAllocSize1);
1134 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1135 E : alloc = heap.Allocate(kAllocSize2);
1136 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1137 E : alloc_to_free.push_back(alloc);
1138 E : for (size_t i = 0; i < 10; ++i) {
1139 E : void* alloc = heap.Allocate(kAllocSize1);
1140 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1141 E : alloc_to_free.push_back(alloc);
1142 E : alloc = heap.Allocate(kAllocSize2);
1143 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1144 E : alloc_to_free.push_back(alloc);
1145 E : }
1146 E : for (size_t i = 0; i < 100; ++i) {
1147 E : void* alloc = heap.Allocate(kAllocSize1);
1148 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1149 E : alloc_to_free.push_back(alloc);
1150 E : alloc = heap.Allocate(kAllocSize2);
1151 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1152 E : alloc_to_free.push_back(alloc);
1153 E : }
1154 E : for (size_t i = 0; i < 1000; ++i) {
1155 E : void* alloc = heap.Allocate(kAllocSize1);
1156 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1157 E : alloc_to_free.push_back(alloc);
1158 E : alloc = heap.Allocate(kAllocSize2);
1159 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1160 E : alloc_to_free.push_back(alloc);
1161 E : }
1162 : // Remove the allocations made during the first iteration, now the
1163 : // allocation sites frequencies should be correctly initialized and and the
1164 : // appropriate heap will be used for each allocation.
1165 E : if (c == 0) {
1166 E : for (const auto& alloc : alloc_to_free)
1167 E : heap.Free(alloc);
1168 E : alloc_to_free.clear();
1169 E : for (size_t i = 0; i < TestBlockHeapManager::kRateTargetedHeapCount; ++i)
1170 E : heap_manager_->rate_targeted_heaps_count_[i] = 0;
1171 : }
1172 E : }
1173 E : for (size_t i = 1, j = 0; i < 10000; i *= 10, ++j) {
1174 : EXPECT_EQ(kIterations * i * 2,
1175 E : heap_manager_->rate_targeted_heaps_count_[j]);
1176 E : }
1177 E : for (const auto& alloc : alloc_to_free)
1178 E : heap.Free(alloc);
1179 E : }
1180 :
1181 : // The BlockHeapManager correctly quarantines the memory after free.
1182 E : TEST_P(BlockHeapManagerTest, QuarantinedAfterFree) {
1183 E : EnableTestZebraBlockHeap();
1184 E : ScopedHeap heap(heap_manager_);
1185 : // Always quarantine if possible.
1186 E : test_zebra_block_heap_->set_quarantine_ratio(1.0);
1187 :
1188 E : const size_t kAllocSize = 0x100;
1189 E : void* alloc = heap.Allocate(kAllocSize);
1190 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1191 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1192 : // Free should succeed, even if the block is quarantined.
1193 E : EXPECT_TRUE(heap.Free(alloc));
1194 : // The block should be quarantined and poisoned.
1195 E : ASSERT_NO_FATAL_FAILURE(VerifyFreedAccess(alloc, kAllocSize));
1196 E : BlockInfo block_info = {};
1197 E : EXPECT_TRUE(Shadow::BlockInfoFromShadow(alloc, &block_info));
1198 :
1199 : {
1200 E : ScopedBlockAccess block_access(block_info);
1201 E : EXPECT_EQ(QUARANTINED_BLOCK, block_info.header->state);
1202 E : }
1203 E : }
1204 :
1205 : // set_parameters should set the zebra_block_heap_quarantine_ratio flag
1206 : // correctly.
1207 E : TEST_P(BlockHeapManagerTest, set_parametersSetsZebraBlockHeapQuarantineRatio) {
1208 E : EnableTestZebraBlockHeap();
1209 E : float new_ratio = 1.0f / 8;
1210 E : ::common::AsanParameters params = heap_manager_->parameters();
1211 E : params.zebra_block_heap_quarantine_ratio = new_ratio;
1212 E : heap_manager_->set_parameters(params);
1213 E : EXPECT_EQ(new_ratio, test_zebra_block_heap_->quarantine_ratio());
1214 E : }
1215 :
1216 : // Test for double free errors using the zebra heap.
1217 E : TEST_P(BlockHeapManagerTest, DoubleFreeOnZebraHeap) {
1218 E : EnableTestZebraBlockHeap();
1219 E : ScopedHeap heap(heap_manager_);
1220 E : test_zebra_block_heap_->set_quarantine_ratio(1.0);
1221 :
1222 E : const size_t kAllocSize = 0xFF;
1223 E : void* alloc = heap.Allocate(kAllocSize);
1224 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1225 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1226 :
1227 E : EXPECT_TRUE(heap.Free(alloc));
1228 E : EXPECT_FALSE(heap.Free(alloc));
1229 :
1230 E : EXPECT_EQ(1u, errors_.size());
1231 E : EXPECT_EQ(DOUBLE_FREE, errors_[0].error_type);
1232 E : EXPECT_EQ(alloc, errors_[0].location);
1233 E : }
1234 :
1235 E : TEST_P(BlockHeapManagerTest, AllocatedBlockIsProtected) {
1236 E : EnableTestZebraBlockHeap();
1237 E : ScopedHeap heap(heap_manager_);
1238 :
1239 E : const size_t kAllocSize = 0xFF;
1240 E : void* alloc = heap.Allocate(kAllocSize);
1241 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1242 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1243 :
1244 E : BlockInfo block_info = {};
1245 E : EXPECT_TRUE(Shadow::BlockInfoFromShadow(alloc, &block_info));
1246 :
1247 : // Test the block protections before being quarantined.
1248 : // The whole block should be unpoisoned in the shadow memory.
1249 E : for (size_t i = 0; i < block_info.body_size; ++i)
1250 E : EXPECT_TRUE(Shadow::IsAccessible(block_info.body + i));
1251 :
1252 : // Ensure that the block left redzone is page-protected.
1253 E : for (size_t i = 0; i < block_info.left_redzone_pages_size; ++i)
1254 i : EXPECT_TRUE(IsNotAccessible(block_info.left_redzone_pages + i));
1255 :
1256 : // Ensure that the block right redzone is page-protected.
1257 E : for (size_t i = 0; i < block_info.right_redzone_pages_size; ++i)
1258 E : EXPECT_TRUE(IsNotAccessible(block_info.right_redzone_pages + i));
1259 :
1260 : // The block body should be accessible.
1261 E : for (size_t i = 0; i < block_info.body_size; ++i)
1262 E : EXPECT_TRUE(IsAccessible(block_info.body + i));
1263 :
1264 : {
1265 E : ScopedBlockAccess block_access(block_info);
1266 E : EXPECT_EQ(ALLOCATED_BLOCK, block_info.header->state);
1267 E : }
1268 :
1269 E : EXPECT_TRUE(heap.Free(alloc));
1270 E : }
1271 :
1272 E : TEST_P(BlockHeapManagerTest, QuarantinedBlockIsProtected) {
1273 E : EnableTestZebraBlockHeap();
1274 E : ScopedHeap heap(heap_manager_);
1275 : // Always quarantine if possible.
1276 E : test_zebra_block_heap_->set_quarantine_ratio(1.0);
1277 :
1278 E : for (size_t i = 0; i < 20; ++i) {
1279 E : const size_t kAllocSize = 0xFF + i;
1280 E : void* alloc = heap.Allocate(kAllocSize);
1281 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1282 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1283 :
1284 E : BlockInfo block_info = {};
1285 E : EXPECT_TRUE(Shadow::BlockInfoFromShadow(alloc, &block_info));
1286 :
1287 : // The block is freed and quarantined.
1288 E : EXPECT_TRUE(heap.Free(alloc));
1289 :
1290 : // Test the block protections after being quarantined.
1291 : // The whole block should be poisoned in the shadow memory.
1292 E : for (size_t i = 0; i < block_info.body_size; ++i)
1293 E : EXPECT_FALSE(Shadow::IsAccessible(block_info.body + i));
1294 :
1295 : // Ensure that the block left redzone is page-protected.
1296 E : for (size_t i = 0; i < block_info.left_redzone_pages_size; ++i)
1297 i : EXPECT_TRUE(IsNotAccessible(block_info.left_redzone_pages + i));
1298 :
1299 : // Ensure that the block right redzone is page-protected.
1300 E : for (size_t i = 0; i < block_info.right_redzone_pages_size; ++i)
1301 E : EXPECT_TRUE(IsNotAccessible(block_info.right_redzone_pages + i));
1302 :
1303 : // Ensure that the block body is page-protected.
1304 E : for (size_t i = 0; i < block_info.body_size; ++i)
1305 E : EXPECT_TRUE(IsNotAccessible(block_info.body + i));
1306 :
1307 : {
1308 E : ScopedBlockAccess block_access(block_info);
1309 E : EXPECT_EQ(QUARANTINED_BLOCK, block_info.header->state);
1310 E : }
1311 E : }
1312 E : }
1313 :
1314 E : TEST_P(BlockHeapManagerTest, NonQuarantinedBlockIsMarkedAsFreed) {
1315 E : EnableTestZebraBlockHeap();
1316 E : ScopedHeap heap(heap_manager_);
1317 : // Desaible the zebra heap quarantine.
1318 E : test_zebra_block_heap_->set_refuse_push(true);
1319 :
1320 E : const size_t kAllocSize = 0x100;
1321 E : void* alloc = heap.Allocate(kAllocSize);
1322 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1323 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1324 :
1325 E : BlockInfo block_info = {};
1326 E : EXPECT_TRUE(Shadow::BlockInfoFromShadow(alloc, &block_info));
1327 :
1328 : // The block is freed but not quarantined.
1329 E : EXPECT_TRUE(heap.Free(alloc));
1330 :
1331 : // The whole block should be unpoisoned in the shadow memory, and its
1332 : // associated pages unprotected.
1333 E : for (size_t i = 0; i < block_info.block_size; ++i) {
1334 E : ASSERT_NO_FATAL_FAILURE(Shadow::IsAccessible(block_info.block + i));
1335 E : ASSERT_FALSE(Shadow::PageIsProtected(block_info.block + i));
1336 E : }
1337 :
1338 E : EXPECT_EQ(FREED_BLOCK, block_info.header->state);
1339 E : }
1340 :
1341 E : TEST_P(BlockHeapManagerTest, ZebraBlockHeapQuarantineRatioIsRespected) {
1342 E : EnableTestZebraBlockHeap();
1343 E : ScopedHeap heap(heap_manager_);
1344 : // Set a non-standard quarantine ratio.
1345 E : float quarantine_ratio = 0.37f;
1346 E : test_zebra_block_heap_->set_quarantine_ratio(quarantine_ratio);
1347 :
1348 E : const size_t kAllocations = 2000;
1349 :
1350 E : size_t zebra_heap_size = test_zebra_block_heap_->slab_count_;
1351 E : const size_t max_quarantine_size = zebra_heap_size * quarantine_ratio;
1352 :
1353 : // All allocations have a maximum size of 1KB, all are served by the zebra
1354 : // heap.
1355 E : for (size_t i = 0; i < kAllocations; ++i) {
1356 E : const size_t kAllocSize = (0x100 + i) % 1024;
1357 E : void* alloc = heap.Allocate(kAllocSize);
1358 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1359 :
1360 E : BlockInfo block_info = {};
1361 E : EXPECT_TRUE(Shadow::BlockInfoFromShadow(alloc, &block_info));
1362 E : EXPECT_TRUE(heap.Free(alloc));
1363 :
1364 : // After Free the quarantine should be trimmed, enforcing the quarantine
1365 : // size upper bound.
1366 E : EXPECT_LE(test_zebra_block_heap_->GetCount(), max_quarantine_size);
1367 :
1368 : {
1369 E : ScopedBlockAccess block_access(block_info);
1370 E : EXPECT_EQ(QUARANTINED_BLOCK, block_info.header->state);
1371 E : }
1372 E : }
1373 E : }
1374 :
1375 : // Ensures that the LargeBlockHeap overrides the provided heap if the allocation
1376 : // size exceeds the threshold.
1377 E : TEST_P(BlockHeapManagerTest, LargeBlockHeapUsedForLargeAllocations) {
1378 E : EnableLargeBlockHeap(GetPageSize());
1379 :
1380 : // Disable targeted heaps as it interferes with this test.
1381 E : ::common::AsanParameters params = heap_manager_->parameters();
1382 E : params.enable_rate_targeted_heaps = false;
1383 E : heap_manager_->SetParameters(params);
1384 :
1385 E : ScopedHeap heap(heap_manager_);
1386 :
1387 E : const size_t kAllocSize = GetPageSize() + 0x100;
1388 E : void* alloc = heap.Allocate(kAllocSize);
1389 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1390 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1391 :
1392 : // Get the heap_id from the block trailer.
1393 E : BlockInfo block_info = {};
1394 E : EXPECT_TRUE(Shadow::BlockInfoFromShadow(alloc, &block_info));
1395 :
1396 : {
1397 E : ScopedBlockAccess block_access(block_info);
1398 : // The heap_id stored in the block trailer should match the large block
1399 : // heap id.
1400 : EXPECT_EQ(heap_manager_->large_block_heap_id_,
1401 E : block_info.trailer->heap_id);
1402 E : }
1403 :
1404 E : EXPECT_TRUE(heap.Free(alloc));
1405 E : }
1406 :
1407 : // Ensures that the LargeBlockHeap is not used for a small allocation.
1408 E : TEST_P(BlockHeapManagerTest, LargeBlockHeapNotUsedForSmallAllocations) {
1409 E : EnableLargeBlockHeap(GetPageSize());
1410 E : ScopedHeap heap(heap_manager_);
1411 :
1412 E : const size_t kAllocSize = 0x100;
1413 E : void* alloc = heap.Allocate(kAllocSize);
1414 E : EXPECT_NE(static_cast<void*>(nullptr), alloc);
1415 E : ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(alloc, kAllocSize));
1416 :
1417 : // Get the heap_id from the block trailer.
1418 E : BlockInfo block_info = {};
1419 E : EXPECT_TRUE(Shadow::BlockInfoFromShadow(alloc, &block_info));
1420 :
1421 : {
1422 E : ScopedBlockAccess block_access(block_info);
1423 : // The provided heap ID should be the one in the block trailer.
1424 E : EXPECT_EQ(heap.Id(), block_info.trailer->heap_id);
1425 E : }
1426 :
1427 E : EXPECT_TRUE(heap.Free(alloc));
1428 E : }
1429 :
1430 E : TEST_P(BlockHeapManagerTest, AllocationFilterFlag) {
1431 E : EXPECT_NE(TLS_OUT_OF_INDEXES, heap_manager_->allocation_filter_flag_tls_);
1432 E : heap_manager_->set_allocation_filter_flag(true);
1433 E : EXPECT_TRUE(heap_manager_->allocation_filter_flag());
1434 E : heap_manager_->set_allocation_filter_flag(false);
1435 E : EXPECT_FALSE(heap_manager_->allocation_filter_flag());
1436 E : heap_manager_->set_allocation_filter_flag(true);
1437 E : EXPECT_TRUE(heap_manager_->allocation_filter_flag());
1438 E : }
1439 :
1440 : namespace {
1441 :
1442 E : size_t CountLockedHeaps(HeapInterface** heaps) {
1443 E : size_t i = 0;
1444 E : while (heaps[i] != nullptr) {
1445 E : ++i;
1446 E : }
1447 E : return i;
1448 E : }
1449 :
1450 : } // namespace
1451 :
1452 E : TEST_P(BlockHeapManagerTest, BestEffortLockAllNoLocksHeld) {
1453 E : heap_manager_->BestEffortLockAll();
1454 : EXPECT_EQ(CountLockedHeaps(heap_manager_->locked_heaps_),
1455 E : heap_manager_->heaps_.size());
1456 E : heap_manager_->UnlockAll();
1457 E : }
1458 :
1459 : namespace {
1460 :
1461 : // A helper thread runner for acquiring a HeapInterface lock for a certain
1462 : // amount of time.
1463 : class GrabHeapLockRunner : public base::DelegateSimpleThread::Delegate {
1464 : public:
1465 E : explicit GrabHeapLockRunner(HeapInterface* heap)
1466 : : heap_(heap), cv_(&cv_lock_), acquired_(false), release_(false) {
1467 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), heap);
1468 E : }
1469 :
1470 E : virtual void Run() {
1471 E : DCHECK_NE(static_cast<HeapInterface*>(nullptr), heap_);
1472 E : heap_->Lock();
1473 E : SignalAcquired();
1474 E : WaitRelease();
1475 E : heap_->Unlock();
1476 E : }
1477 :
1478 : // Waits until |acquired| is true.
1479 E : void WaitAcquired() {
1480 E : while (true) {
1481 E : base::AutoLock auto_lock(cv_lock_);
1482 E : if (acquired_)
1483 E : return;
1484 i : cv_.Wait();
1485 i : }
1486 E : }
1487 :
1488 : // To be called externally to notify this runner that the lock may be
1489 : // released and the thread torn down.
1490 E : void SignalRelease() {
1491 E : base::AutoLock auto_lock(cv_lock_);
1492 E : release_ = true;
1493 E : cv_.Broadcast();
1494 E : }
1495 :
1496 : private:
1497 : // Notifies external observers that the lock has been acquired.
1498 E : void SignalAcquired() {
1499 E : base::AutoLock auto_lock(cv_lock_);
1500 E : acquired_ = true;
1501 E : cv_.Broadcast();
1502 E : }
1503 :
1504 : // Waits until |release| is true.
1505 E : void WaitRelease() {
1506 E : while (true) {
1507 E : base::AutoLock auto_lock(cv_lock_);
1508 E : if (release_)
1509 E : return;
1510 E : cv_.Wait();
1511 E : }
1512 E : }
1513 :
1514 : HeapInterface* heap_;
1515 : base::Lock cv_lock_;
1516 : base::ConditionVariable cv_;
1517 : bool acquired_;
1518 : bool release_;
1519 :
1520 : DISALLOW_COPY_AND_ASSIGN(GrabHeapLockRunner);
1521 : };
1522 :
1523 : } // namespace
1524 :
1525 E : TEST_P(BlockHeapManagerTest, BestEffortLockAllOneHeapLockHeld) {
1526 E : ASSERT_FALSE(heap_manager_->heaps_.empty());
1527 E : GrabHeapLockRunner runner(heap_manager_->heaps_.begin()->first);
1528 E : base::DelegateSimpleThread thread(&runner, "GrabHeapLockRunner");
1529 E : thread.Start();
1530 E : runner.WaitAcquired();
1531 E : heap_manager_->BestEffortLockAll();
1532 :
1533 : // Expect all but one heap lock to have been acquired.
1534 : EXPECT_EQ(CountLockedHeaps(heap_manager_->locked_heaps_),
1535 E : heap_manager_->heaps_.size() - 1);
1536 E : heap_manager_->UnlockAll();
1537 E : runner.SignalRelease();
1538 E : thread.Join();
1539 E : }
1540 :
1541 : // These functions are tested explicitly because the AsanRuntime reaches in
1542 : // to use them.
1543 :
1544 E : TEST_P(BlockHeapManagerTest, IsValidHeapIdUnlocked) {
1545 E : ASSERT_FALSE(heap_manager_->heaps_.empty());
1546 E : EXPECT_FALSE(heap_manager_->IsValidHeapIdUnlocked(0xDEADBEEF, false));
1547 E : for (auto& hq_pair : heap_manager_->heaps_) {
1548 E : TestBlockHeapManager::HeapQuarantinePair* hq = &hq_pair;
1549 : TestBlockHeapManager::HeapId heap_id =
1550 E : reinterpret_cast<TestBlockHeapManager::HeapId>(hq);
1551 E : EXPECT_TRUE(heap_manager_->IsValidHeapIdUnlocked(heap_id, false));
1552 E : }
1553 E : }
1554 :
1555 E : TEST_P(BlockHeapManagerTest, GetHeapTypeUnlocked) {
1556 E : ASSERT_FALSE(heap_manager_->heaps_.empty());
1557 E : for (auto& hq_pair : heap_manager_->heaps_) {
1558 E : TestBlockHeapManager::HeapQuarantinePair* hq = &hq_pair;
1559 : TestBlockHeapManager::HeapId heap_id =
1560 E : reinterpret_cast<TestBlockHeapManager::HeapId>(hq);
1561 E : EXPECT_NE(kUnknownHeapType, heap_manager_->GetHeapTypeUnlocked(heap_id));
1562 E : }
1563 E : }
1564 :
1565 : namespace {
1566 :
1567 E : bool ShadowIsConsistentPostAlloc(const void* alloc, size_t size) {
1568 E : uintptr_t index = reinterpret_cast<uintptr_t>(alloc);
1569 E : index >>= kShadowRatioLog;
1570 E : uintptr_t index_end = index + (size >> kShadowRatioLog);
1571 E : for (size_t i = index; i < index_end; ++i) {
1572 E : if (Shadow::shadow()[i] != ShadowMarker::kHeapAddressableMarker)
1573 i : return false;
1574 E : }
1575 E : return true;
1576 E : }
1577 :
1578 E : bool ShadowIsConsistentPostFree(const void* alloc, size_t size) {
1579 E : uintptr_t index = reinterpret_cast<uintptr_t>(alloc);
1580 E : index >>= kShadowRatioLog;
1581 E : uintptr_t index_end = index + (size >> kShadowRatioLog);
1582 :
1583 E : uint8 m = Shadow::shadow()[index];
1584 : if (m != ShadowMarker::kHeapAddressableMarker &&
1585 : m != ShadowMarker::kAsanReservedMarker &&
1586 E : m != ShadowMarker::kHeapFreedMarker) {
1587 i : return false;
1588 : }
1589 :
1590 : // We expect green memory only for large allocations which are directly
1591 : // mapped. Small allocations should be returned to a common pool and
1592 : // marked as reserved. Note that this is only specifically true of the
1593 : // CtMalloc heap.
1594 E : if (m == ShadowMarker::kHeapAddressableMarker && size < 1 * 1024 * 1024)
1595 i : return false;
1596 :
1597 E : for (size_t i = index; i < index_end; ++i) {
1598 E : if (Shadow::shadow()[index] != m)
1599 i : return false;
1600 E : }
1601 E : return true;
1602 E : }
1603 :
1604 : class TestShadow : public Shadow {
1605 : public:
1606 : using Shadow::Reset;
1607 : };
1608 :
1609 : } // namespace
1610 :
1611 : // A stress test of the CtMalloc heap integration.
1612 E : TEST(BlockHeapManagerIntegrationTest, CtMallocStressTest) {
1613 E : TestShadow::Reset();
1614 E : TestShadow::SetUp();
1615 E : ASSERT_TRUE(TestShadow::IsClean());
1616 :
1617 : // Set up a configuration that disables most features, but specifically
1618 : // enables CtMalloc. We also enable mixed allocation guards to increase
1619 : // the complexity.
1620 : ::common::AsanParameters p;
1621 E : ::common::SetDefaultAsanParameters(&p);
1622 E : p.check_heap_on_failure = false;
1623 E : p.enable_ctmalloc = true;
1624 E : p.enable_zebra_block_heap = false;
1625 E : p.enable_large_block_heap = false;
1626 E : p.enable_allocation_filter = false;
1627 E : p.enable_rate_targeted_heaps = false;
1628 E : p.allocation_guard_rate = 0.5;
1629 E : p.zebra_block_heap_size = 0;
1630 E : p.zebra_block_heap_quarantine_ratio = 0.0;
1631 :
1632 : // Initialize a block heap manager.
1633 E : AsanLogger al;
1634 E : scoped_ptr<StackCaptureCache> scc(new StackCaptureCache(&al));
1635 E : scoped_ptr<TestBlockHeapManager> bhm(new TestBlockHeapManager(scc.get()));
1636 E : bhm->set_parameters(p);
1637 E : bhm->Init();
1638 E : BlockHeapManager::HeapId hid = bhm->CreateHeap();
1639 :
1640 : // The target number of allocations to be alive on average.
1641 : static const size_t kAlive = 1000;
1642 :
1643 : // The maximum allocation size, in bits.
1644 : // CtMalloc can serve up to 1MB (20 bits) allocations from standard super
1645 : // pages, and anything else is served by a custom page that is directly
1646 : // allocated from the OS.
1647 : static const size_t kMinAllocBits = 5;
1648 : static const size_t kMaxAllocBits = 23;
1649 :
1650 : // The number of operations to perform.
1651 : static const size_t kOpCount = 1000000;
1652 :
1653 : // Let's stress test the heap.
1654 E : std::vector<std::pair<void*, size_t>> allocs;
1655 E : size_t net_allocs = 0;
1656 E : allocs.reserve(2 * kAlive);
1657 E : for (size_t i = 0; i < kOpCount; ++i) {
1658 : // Get a number between 0 and 2 * kAlive.
1659 E : size_t op = ::rand() % (2 * kAlive);
1660 :
1661 : // If the number of live allocations is low then most of our space will
1662 : // call for an allocation, and a minority will call for a free.
1663 : // If the number of allocations is exactly at the target, then the chances
1664 : // are 50/50. If the number of allocations is much higher than the target
1665 : // then the chance of free is higher. Combined, this should keep that
1666 : // average number of allocations close to the target.
1667 E : op = op < allocs.size() ? 1 : 0;
1668 :
1669 : // If after one more allocation there will be more objects left than
1670 : // chances left to clean them up, then stick to a free. This ensures that
1671 : // we exit the loop with all allocations cleaned up.
1672 E : if (i + 1 + allocs.size() + 1 >= kOpCount)
1673 E : op = 1;
1674 :
1675 : // Performan an allocation.
1676 E : if (op == 0) {
1677 : // Want each smaller size to be twice as likely as the next higher
1678 : // size. Use the bits in the random number as coin tosses that decide
1679 : // whether to stay at the current size or move to a bigger size.
1680 E : size_t bits = ::rand();
1681 E : size_t size = 1 << kMinAllocBits;
1682 E : while (bits & 1 && size < (1 << kMaxAllocBits)) {
1683 E : size <<= 1;
1684 E : bits >>= 1;
1685 E : }
1686 :
1687 : // Perform the allocation.
1688 E : void* alloc = bhm->Allocate(hid, size);
1689 E : DCHECK(alloc != nullptr);
1690 E : allocs.push_back(std::make_pair(alloc, size));
1691 E : net_allocs += size;
1692 :
1693 : // Check that the shadow memory is green.
1694 E : ASSERT_TRUE(ShadowIsConsistentPostAlloc(alloc, size));
1695 E : } else {
1696 E : if (allocs.empty())
1697 E : continue;
1698 E : size_t j = ::rand() % allocs.size();
1699 E : void* alloc = allocs[j].first;
1700 E : size_t size = allocs[j].second;
1701 :
1702 E : bhm->Free(hid, alloc);
1703 E : allocs[j] = allocs.back();
1704 E : allocs.resize(allocs.size() - 1);
1705 E : net_allocs -= size;
1706 :
1707 : // Check that the shadow memory is green or reserved. This is
1708 : // smart enough to check for the appropriate condition based on the
1709 : // allocation size.
1710 E : ASSERT_TRUE(ShadowIsConsistentPostFree(alloc, size));
1711 : }
1712 E : }
1713 :
1714 : // All of the allocations should have been cleaned up.
1715 E : ASSERT_TRUE(allocs.empty());
1716 :
1717 : // Force everything to be cleaned up.
1718 E : bhm.reset();
1719 E : scc.reset();
1720 :
1721 : // Ensure that the shadow memory has been completely cleaned up.
1722 E : ASSERT_TRUE(TestShadow::IsClean());
1723 E : }
1724 :
1725 : } // namespace heap_managers
1726 : } // namespace asan
1727 : } // namespace agent
|