1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/block.h"
16 :
17 : #include <set>
18 :
19 : #include "windows.h"
20 :
21 : #include "base/memory/scoped_ptr.h"
22 : #include "gtest/gtest.h"
23 : #include "syzygy/agent/asan/page_protection_helpers.h"
24 : #include "syzygy/agent/asan/runtime.h"
25 : #include "syzygy/agent/asan/unittest_util.h"
26 :
27 : namespace agent {
28 : namespace asan {
29 :
30 : namespace {
31 :
32 : using testing::_;
33 :
34 : BlockLayout BuildBlockLayout(size_t block_alignment,
35 : size_t block_size,
36 : size_t header_size,
37 : size_t header_padding_size,
38 : size_t body_size,
39 : size_t trailer_padding_size,
40 E : size_t trailer_size) {
41 : BlockLayout layout = { block_alignment, block_size, header_size,
42 E : header_padding_size, body_size, trailer_padding_size, trailer_size };
43 E : return layout;
44 E : }
45 :
46 : // Checks that the given block is valid, and initialized as expected.
47 E : void IsValidBlockImpl(const BlockInfo& block, bool just_initialized) {
48 E : EXPECT_EQ(0u, block.block_size % kShadowRatio);
49 :
50 : // Validate the layout of the block.
51 E : EXPECT_TRUE(block.header != nullptr);
52 E : EXPECT_EQ(0u, block.block_size % kShadowRatio);
53 E : EXPECT_EQ(0u, block.header_padding_size % kShadowRatio);
54 : EXPECT_EQ(block.RawHeader() + sizeof(BlockHeader),
55 E : block.RawHeaderPadding());
56 : EXPECT_EQ(block.RawHeaderPadding() + block.header_padding_size,
57 E : block.RawBody());
58 : EXPECT_EQ(block.RawBody() + block.body_size,
59 E : block.RawTrailerPadding());
60 : EXPECT_EQ(block.RawTrailerPadding() + block.trailer_padding_size,
61 E : block.RawTrailer());
62 : EXPECT_EQ(block.RawHeader() + block.block_size,
63 E : block.RawTrailer() + sizeof(BlockTrailer));
64 :
65 : // Validate the actual contents of the various parts of the block.
66 :
67 : // Check the header.
68 E : EXPECT_EQ(kBlockHeaderMagic, block.header->magic);
69 E : EXPECT_FALSE(block.header->is_nested);
70 E : EXPECT_LT(0u, block.header->body_size);
71 E : EXPECT_EQ(block.header->body_size, block.body_size);
72 E : if (just_initialized) {
73 E : EXPECT_EQ(0u, block.header->checksum);
74 E : EXPECT_EQ(NULL, block.header->alloc_stack);
75 E : EXPECT_EQ(NULL, block.header->free_stack);
76 E : EXPECT_EQ(ALLOCATED_BLOCK, block.header->state);
77 : }
78 :
79 : // By default we assume the blocks to not be nested.
80 E : EXPECT_FALSE(block.header->is_nested);
81 E : EXPECT_EQ(static_cast<bool>(block.header->is_nested), block.is_nested);
82 :
83 : // Check the header padding.
84 E : if (block.header->has_header_padding) {
85 E : EXPECT_LE(kShadowRatio, block.header_padding_size);
86 : EXPECT_EQ(block.header_padding_size,
87 E : *reinterpret_cast<const uint32*>(block.header_padding));
88 : EXPECT_EQ(block.header_padding_size,
89 : *reinterpret_cast<const uint32*>(block.RawHeaderPadding() +
90 E : block.header_padding_size - sizeof(uint32)));
91 E : for (size_t i = sizeof(uint32);
92 E : i < block.header_padding_size - sizeof(uint32);
93 E : ++i) {
94 E : EXPECT_EQ(kBlockHeaderPaddingByte, block.RawHeaderPadding(i));
95 E : }
96 : }
97 :
98 : // Check the trailer padding.
99 E : size_t start_of_trailer_iteration = 0;
100 E : if (block.header->has_excess_trailer_padding) {
101 E : start_of_trailer_iteration = 4;
102 : EXPECT_EQ(block.trailer_padding_size,
103 E : *reinterpret_cast<const uint32*>(block.trailer_padding));
104 : }
105 E : for (size_t i = start_of_trailer_iteration; i < block.trailer_padding_size;
106 E : ++i) {
107 E : EXPECT_EQ(kBlockTrailerPaddingByte, block.RawTrailerPadding(i));
108 E : }
109 :
110 : // Check the trailer.
111 E : EXPECT_NE(0u, block.trailer->alloc_tid);
112 E : EXPECT_GE(::GetTickCount(), block.trailer->alloc_ticks);
113 E : if (just_initialized) {
114 E : EXPECT_EQ(0u, block.trailer->free_tid);
115 E : EXPECT_EQ(0u, block.trailer->free_ticks);
116 : }
117 E : }
118 :
119 E : void IsValidInitializedBlock(const BlockInfo& block) {
120 E : IsValidBlockImpl(block, true);
121 E : }
122 :
123 : void IsValidBlock(const BlockInfo& block) {
124 : IsValidBlockImpl(block, false);
125 : }
126 :
127 : class BlockTest : public testing::OnExceptionCallbackTest {
128 : public:
129 : using Super = testing::OnExceptionCallbackTest;
130 :
131 E : void SetUp() override {
132 E : Super::SetUp();
133 E : shadow_.SetUp();
134 E : }
135 :
136 E : void TearDown() override {
137 E : shadow_.TearDown();
138 E : Super::TearDown();
139 E : }
140 :
141 : Shadow shadow_;
142 : };
143 :
144 : } // namespace
145 :
146 E : bool operator==(const BlockLayout& bl1, const BlockLayout& bl2) {
147 E : return ::memcmp(&bl1, &bl2, sizeof(BlockLayout)) == 0;
148 E : }
149 :
150 E : bool operator==(const BlockInfo& bi1, const BlockInfo& bi2) {
151 E : return ::memcmp(&bi1, &bi2, sizeof(BlockInfo)) == 0;
152 E : }
153 :
154 E : TEST_F(BlockTest, BlockPlanLayout) {
155 E : BlockLayout layout = {};
156 :
157 : // Zero sized allocations should work fine.
158 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 0, 0, 0, &layout));
159 E : EXPECT_EQ(BuildBlockLayout(8, 40, 16, 0, 0, 4, 20), layout);
160 :
161 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 60, 32, 32, &layout));
162 E : EXPECT_EQ(BuildBlockLayout(8, 128, 16, 16, 60, 16, 20), layout);
163 :
164 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 60, 0, 0, &layout));
165 E : EXPECT_EQ(BuildBlockLayout(8, 96, 16, 0, 60, 0, 20), layout);
166 :
167 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 64, 0, 0, &layout));
168 E : EXPECT_EQ(BuildBlockLayout(8, 104, 16, 0, 64, 4, 20), layout);
169 :
170 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 61, 0, 0, &layout));
171 E : EXPECT_EQ(BuildBlockLayout(8, 104, 16, 0, 61, 7, 20), layout);
172 :
173 : // Plan a layout that would use guard pages.
174 E : EXPECT_TRUE(BlockPlanLayout(4096, 8, 100, 4096, 4096, &layout));
175 E : EXPECT_EQ(BuildBlockLayout(4096, 3 * 4096, 16, 8072, 100, 4080, 20), layout);
176 :
177 : // Plan a layout with an invalid size, this should fail.
178 E : EXPECT_FALSE(BlockPlanLayout(8, 8, 0xffffffff, 0, 0, &layout));
179 E : }
180 :
181 E : TEST_F(BlockTest, EndToEnd) {
182 E : BlockLayout layout = {};
183 E : BlockInfo block_info = {};
184 :
185 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 4, 0, 0, &layout));
186 E : scoped_ptr<uint8> block_data(new uint8[layout.block_size]);
187 E : ::memset(block_data.get(), 0, layout.block_size);
188 E : ASSERT_TRUE(block_data != NULL);
189 E : BlockInitialize(layout, block_data.get(), false, &block_info);
190 E : EXPECT_NO_FATAL_FAILURE(IsValidInitializedBlock(block_info));
191 E : block_data.reset(NULL);
192 :
193 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 61, 0, 0, &layout));
194 E : block_data.reset(new uint8[layout.block_size]);
195 E : ::memset(block_data.get(), 0, layout.block_size);
196 E : ASSERT_TRUE(block_data != NULL);
197 E : BlockInitialize(layout, block_data.get(), false, &block_info);
198 E : EXPECT_NO_FATAL_FAILURE(IsValidInitializedBlock(block_info));
199 E : block_data.reset(NULL);
200 :
201 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 60, 32, 32, &layout));
202 E : block_data.reset(new uint8[layout.block_size]);
203 E : ::memset(block_data.get(), 0, layout.block_size);
204 E : ASSERT_TRUE(block_data != NULL);
205 E : BlockInitialize(layout, block_data.get(), false, &block_info);
206 E : EXPECT_NO_FATAL_FAILURE(IsValidInitializedBlock(block_info));
207 E : block_data.reset(NULL);
208 :
209 : // Do an allocation that uses entire pages.
210 E : EXPECT_TRUE(BlockPlanLayout(4096, 8, 100, 4096, 4096, &layout));
211 : void* data = ::VirtualAlloc(NULL, layout.block_size, MEM_COMMIT,
212 E : PAGE_READWRITE);
213 E : ::memset(data, 0, layout.block_size);
214 E : ASSERT_TRUE(data != NULL);
215 E : BlockInitialize(layout, data, false, &block_info);
216 E : EXPECT_NO_FATAL_FAILURE(IsValidInitializedBlock(block_info));
217 E : ASSERT_EQ(TRUE, ::VirtualFree(data, 0, MEM_RELEASE));
218 E : }
219 :
220 E : TEST_F(BlockTest, GetHeaderFromBody) {
221 : // Plan two layouts, one with header padding and another without.
222 E : BlockLayout layout1 = {};
223 E : BlockLayout layout2 = {};
224 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 0, 0, &layout1));
225 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 32, 0, &layout2));
226 :
227 E : scoped_ptr<uint8> data(new uint8[layout2.block_size]);
228 E : ::memset(data.get(), 0, layout2.block_size);
229 :
230 : // First try navigating a block without header padding.
231 E : BlockInfo info = {};
232 E : BlockInitialize(layout1, data.get(), false, &info);
233 : // This should succeed as expected.
234 E : EXPECT_EQ(info.header, BlockGetHeaderFromBody(info.body));
235 : // This fails because of invalid alignment.
236 : EXPECT_TRUE(BlockGetHeaderFromBody(
237 E : reinterpret_cast<BlockBody*>(info.RawBody() + 1)) == nullptr);
238 : // This fails because the pointer is not at the beginning of the
239 : // body.
240 : EXPECT_TRUE(BlockGetHeaderFromBody(
241 E : reinterpret_cast<BlockBody*>(info.RawBody() + kShadowRatio)) == nullptr);
242 : // This fails because of invalid header magic.
243 E : ++info.header->magic;
244 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
245 : // This fails because the header indicates there's padding.
246 E : --info.header->magic;
247 E : info.header->has_header_padding = 1;
248 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
249 :
250 : // Now navigate a block with header padding.
251 E : BlockInitialize(layout2, data.get(), false, &info);
252 : // This should succeed as expected.
253 E : EXPECT_EQ(info.header, BlockGetHeaderFromBody(info.body));
254 : // This fails because of invalid alignment.
255 : EXPECT_TRUE(BlockGetHeaderFromBody(
256 E : reinterpret_cast<BlockBody*>(info.RawBody() + 1)) == nullptr);
257 : // This fails because the pointer is not at the beginning of the
258 : // body.
259 : EXPECT_TRUE(BlockGetHeaderFromBody(
260 E : reinterpret_cast<BlockBody*>(info.RawBody() + kShadowRatio)) == nullptr);
261 : // This fails because of invalid header magic.
262 E : ++info.header->magic;
263 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
264 : // This fails because the header indicates there's no padding.
265 E : --info.header->magic;
266 E : info.header->has_header_padding = 0;
267 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
268 : // This fails because the padding length is invalid.
269 E : info.header->has_header_padding = 1;
270 E : uint32* head = reinterpret_cast<uint32*>(info.header_padding);
271 E : uint32* tail = head + (info.header_padding_size / sizeof(uint32)) - 1;
272 E : ++(*tail);
273 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
274 : // This fails because the padding lengths don't agree.
275 E : --(*tail);
276 E : ++(*head);
277 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
278 E : }
279 :
280 E : TEST_F(BlockTest, GetHeaderFromBodyProtectedMemory) {
281 E : BlockLayout layout = {};
282 E : EXPECT_TRUE(BlockPlanLayout(4096, 4096, 4096, 4096, 4096, &layout));
283 : void* alloc = ::VirtualAlloc(NULL, layout.block_size, MEM_COMMIT,
284 E : PAGE_READWRITE);
285 E : ASSERT_TRUE(alloc != NULL);
286 E : BlockInfo block_info = {};
287 E : BlockInitialize(layout, alloc, false, &block_info);
288 :
289 E : BlockProtectRedzones(block_info, &shadow_);
290 E : EXPECT_CALL(*this, OnExceptionCallback(_));
291 E : EXPECT_TRUE(BlockGetHeaderFromBody(block_info.body) == NULL);
292 E : testing::Mock::VerifyAndClearExpectations(this);
293 E : BlockProtectNone(block_info, &shadow_);
294 :
295 E : ASSERT_EQ(TRUE, ::VirtualFree(alloc, 0, MEM_RELEASE));
296 E : }
297 :
298 E : TEST_F(BlockTest, ConvertBlockInfo) {
299 E : BlockLayout layout = {};
300 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 0, 0, &layout));
301 :
302 E : scoped_ptr<uint8> data(new uint8[layout.block_size]);
303 E : ::memset(data.get(), 0, layout.block_size);
304 :
305 E : BlockInfo expanded = {};
306 E : BlockInitialize(layout, data.get(), false, &expanded);
307 :
308 E : CompactBlockInfo compact = {};
309 E : ConvertBlockInfo(expanded, &compact);
310 E : EXPECT_EQ(layout.block_size, compact.block_size);
311 : EXPECT_EQ(layout.header_size + layout.header_padding_size,
312 E : compact.header_size);
313 : EXPECT_EQ(layout.trailer_size + layout.trailer_padding_size,
314 E : compact.trailer_size);
315 E : EXPECT_FALSE(compact.is_nested);
316 :
317 E : BlockInfo expanded2 = {};
318 E : ConvertBlockInfo(compact, &expanded2);
319 E : EXPECT_EQ(0, ::memcmp(&expanded, &expanded2, sizeof(expanded)));
320 E : }
321 :
322 E : TEST_F(BlockTest, BlockInfoFromMemory) {
323 : // Plan two layouts, one with header padding and another without.
324 E : BlockLayout layout1 = {};
325 E : BlockLayout layout2 = {};
326 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 0, 0, &layout1));
327 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 32, 0, &layout2));
328 :
329 E : scoped_ptr<uint8> data(new uint8[layout2.block_size]);
330 E : ::memset(data.get(), 0, layout2.block_size);
331 :
332 : // First recover a block without header padding.
333 E : BlockInfo info = {};
334 E : BlockInitialize(layout1, data.get(), false, &info);
335 E : BlockInfo info_recovered = {};
336 E : EXPECT_TRUE(BlockInfoFromMemory(info.header, &info_recovered));
337 E : EXPECT_EQ(info, info_recovered);
338 : // Failed because its not aligned.
339 : EXPECT_FALSE(BlockInfoFromMemory(
340 : reinterpret_cast<BlockHeader*>(info.RawHeader() + 1),
341 E : &info_recovered));
342 : // Failed because the magic is invalid.
343 E : ++info.header->magic;
344 E : EXPECT_FALSE(BlockInfoFromMemory(info.header, &info_recovered));
345 E : --info.header->magic;
346 : // This fails because the header indicates there's padding yet there is
347 : // none.
348 E : info.header->has_header_padding = 1;
349 E : EXPECT_FALSE(BlockInfoFromMemory(info.header, &info_recovered));
350 :
351 : // Now recover a block with header padding.
352 E : BlockInitialize(layout2, data.get(), false, &info);
353 E : EXPECT_TRUE(BlockInfoFromMemory(info.header, &info_recovered));
354 E : EXPECT_EQ(info, info_recovered);
355 : // Failed because the magic is invalid.
356 E : ++info.header->magic;
357 E : EXPECT_FALSE(BlockInfoFromMemory(info.header, &info_recovered));
358 E : --info.header->magic;
359 : // Failed because the header padding lengths don't match.
360 E : uint32* head = reinterpret_cast<uint32*>(info.header_padding);
361 E : uint32* tail = head + (info.header_padding_size / sizeof(uint32)) - 1;
362 E : ++(*tail);
363 E : EXPECT_FALSE(BlockInfoFromMemory(info.header, &info_recovered));
364 E : --(*tail);
365 :
366 : // Finally ensure that we can recover information about blocks of various
367 : // sizes.
368 E : const size_t kAllocSize = 3 * GetPageSize();
369 E : void* alloc = ::VirtualAlloc(NULL, kAllocSize, MEM_COMMIT, PAGE_READWRITE);
370 E : for (size_t block_size = 0; block_size < kShadowRatio * 2; ++block_size) {
371 E : BlockLayout layout = {};
372 : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, block_size, 0, 0,
373 E : &layout));
374 E : ASSERT_LE(layout.block_size, kAllocSize);
375 E : BlockInitialize(layout, alloc, false, &info);
376 E : EXPECT_TRUE(BlockInfoFromMemory(info.header, &info_recovered));
377 E : EXPECT_EQ(info.body_size, info_recovered.body_size);
378 E : EXPECT_EQ(info, info_recovered);
379 :
380 : EXPECT_TRUE(BlockPlanLayout(4096, 4096, block_size, 4096, 4096,
381 E : &layout));
382 E : ASSERT_LE(layout.block_size, kAllocSize);
383 E : BlockInitialize(layout, alloc, false, &info);
384 E : EXPECT_TRUE(BlockInfoFromMemory(info.header, &info_recovered));
385 E : EXPECT_EQ(info.body_size, info_recovered.body_size);
386 E : EXPECT_EQ(info, info_recovered);
387 E : }
388 E : ::VirtualFree(alloc, 0, MEM_RELEASE);
389 E : }
390 :
391 E : TEST_F(BlockTest, BlockInfoFromMemoryInvalidPadding) {
392 E : BlockLayout layout = {};
393 : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10,
394 E : 4 * sizeof(BlockHeader), 0, &layout));
395 :
396 E : scoped_ptr<uint8> data(new uint8[layout.block_size]);
397 E : ::memset(data.get(), 0, layout.block_size);
398 :
399 E : BlockInfo info = {};
400 E : BlockInitialize(layout, data.get(), false, &info);
401 E : EXPECT_EQ(1, info.header->has_header_padding);
402 E : BlockInfo info_recovered = {};
403 E : EXPECT_TRUE(BlockInfoFromMemory(info.header, &info_recovered));
404 E : EXPECT_EQ(info, info_recovered);
405 :
406 : // Invalidates the padding size and make sure that we can't retrieve the block
407 : // information.
408 E : size_t* padding_size = reinterpret_cast<size_t*>(info.header + 1);
409 E : EXPECT_GE(*padding_size, 2 * sizeof(uint32));
410 E : for (*padding_size = 0;
411 E : *padding_size < 2 * sizeof(uint32);
412 E : ++(*padding_size)) {
413 E : EXPECT_FALSE(BlockInfoFromMemory(info.header, &info_recovered));
414 E : }
415 E : }
416 :
417 E : TEST_F(BlockTest, BlockInfoFromMemoryProtectedMemory) {
418 E : BlockLayout layout = {};
419 E : EXPECT_TRUE(BlockPlanLayout(4096, 4096, 4096, 4096, 4096, &layout));
420 : void* alloc = ::VirtualAlloc(NULL, layout.block_size, MEM_COMMIT,
421 E : PAGE_READWRITE);
422 E : ASSERT_TRUE(alloc != NULL);
423 E : BlockInfo block_info = {};
424 E : BlockInitialize(layout, alloc, false, &block_info);
425 :
426 E : BlockProtectRedzones(block_info, &shadow_);
427 E : BlockInfo recovered_info = {};
428 E : EXPECT_CALL(*this, OnExceptionCallback(_));
429 E : EXPECT_FALSE(BlockInfoFromMemory(block_info.header, &recovered_info));
430 E : testing::Mock::VerifyAndClearExpectations(this);
431 E : BlockProtectNone(block_info, &shadow_);
432 :
433 E : ASSERT_EQ(TRUE, ::VirtualFree(alloc, 0, MEM_RELEASE));
434 E : }
435 :
436 E : TEST_F(BlockTest, BlockInfoFromMemoryForNestedBlock) {
437 E : BlockLayout layout = {};
438 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 0, 0, &layout));
439 :
440 E : scoped_ptr<uint8> data(new uint8[layout.block_size]);
441 E : BlockInfo block_info = {};
442 E : BlockInitialize(layout, data.get(), true, &block_info);
443 :
444 E : BlockInfo recovered_info = {};
445 E : EXPECT_TRUE(BlockInfoFromMemory(block_info.header, &recovered_info));
446 :
447 E : EXPECT_TRUE(recovered_info.is_nested);
448 E : EXPECT_TRUE(recovered_info.header->is_nested);
449 E : }
450 :
451 E : TEST_F(BlockTest, ChecksumWorksForAllStates) {
452 E : BlockLayout layout = {};
453 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 0, 0, &layout));
454 E : scoped_ptr<uint8> data(new uint8[layout.block_size]);
455 E : ::memset(data.get(), 0, layout.block_size);
456 E : BlockInfo info = {};
457 E : BlockInitialize(layout, data.get(), false, &info);
458 E : while (true) {
459 E : BlockCalculateChecksum(info);
460 E : ++info.header->state;
461 E : if (info.header->state == 0)
462 E : break;
463 E : }
464 E : }
465 :
466 : namespace {
467 :
468 : // Given two arrays of data, compares them byte-by-byte to find the first
469 : // byte with altered data. Within that byte determines the mask of bits that
470 : // have been altered. Returns the results via |offset| and |mask|.
471 : void FindModifiedBits(size_t length,
472 : const uint8* buffer1,
473 : const uint8* buffer2,
474 : size_t* offset,
475 E : uint8* mask) {
476 E : ASSERT_TRUE(buffer1 != NULL);
477 E : ASSERT_TRUE(buffer2 != NULL);
478 E : ASSERT_TRUE(offset != NULL);
479 E : ASSERT_TRUE(mask != NULL);
480 :
481 E : for (size_t i = 0; i < length; ++i) {
482 E : if (buffer1[i] != buffer2[i]) {
483 E : *offset = i;
484 E : *mask = buffer1[i] ^ buffer2[i];
485 E : return;
486 : }
487 E : }
488 :
489 i : *offset = 0;
490 i : *mask = 0;
491 E : }
492 :
493 : // This is initialized by TestChecksumDetectsTampering, but referred to by
494 : // ChecksumDetectsTamperingWithMask as well, hence not in a function.
495 : size_t state_offset = SIZE_MAX;
496 : uint8 state_mask = 0;
497 :
498 : bool ChecksumDetectsTamperingWithMask(const BlockInfo& block_info,
499 : void* address_to_modify,
500 E : uint8 mask_to_modify) {
501 E : uint8* byte_to_modify = reinterpret_cast<uint8*>(address_to_modify);
502 :
503 : // Remember the original contents.
504 E : uint8 original_value = *byte_to_modify;
505 E : uint8 original_bits = original_value & ~mask_to_modify;
506 :
507 : // Since the checksum can collide we check a handful of times to build up
508 : // some confidence. Since we sometimes expect this to return false the number
509 : // of iterations needs to be kept reasonably low to keep the unittest fast.
510 E : bool detected = false;
511 E : BlockSetChecksum(block_info);
512 E : uint32 checksum = block_info.header->checksum;
513 E : for (size_t i = 0; i < 4; ++i) {
514 : // Modify the value, altering only bits in |mask_to_modify|.
515 E : while (true) {
516 E : ++(*byte_to_modify);
517 E : if (((*byte_to_modify) & ~mask_to_modify) == original_bits)
518 E : break;
519 E : }
520 E : BlockSetChecksum(block_info);
521 E : if (block_info.header->checksum != checksum) {
522 : // Success, the checksum detected the change!
523 : // Restore the original checksum so the block analysis can continue.
524 E : block_info.header->checksum = checksum;
525 E : detected = true;
526 E : break;
527 : }
528 E : }
529 :
530 : // Run a detailed analysis on the block. We expect the results of this to
531 : // agree with where the block was modified.
532 E : BlockAnalysisResult result = {};
533 : BlockAnalyze(static_cast<BlockState>(block_info.header->state), block_info,
534 E : &result);
535 E : if (address_to_modify < block_info.body) {
536 E : EXPECT_EQ(kDataIsCorrupt, result.block_state);
537 : // If the thing being modified is the block state, then this is so
538 : // localized that the analysis will sometimes mess up. Seeing this in
539 : // the wild is quite unlikely.
540 : // TODO(chrisha): If we ever have individual checksums for the header,
541 : // the body and the trailer, then revisit this.
542 : if (address_to_modify != block_info.RawHeader() + state_offset ||
543 E : mask_to_modify != state_mask) {
544 E : EXPECT_EQ(kDataIsCorrupt, result.header_state);
545 E : EXPECT_EQ(kDataStateUnknown, result.body_state);
546 E : EXPECT_EQ(kDataIsClean, result.trailer_state);
547 E : }
548 E : } else if (address_to_modify >= block_info.trailer_padding) {
549 E : EXPECT_EQ(kDataIsCorrupt, result.block_state);
550 E : EXPECT_EQ(kDataIsClean, result.header_state);
551 E : EXPECT_EQ(kDataStateUnknown, result.body_state);
552 E : EXPECT_EQ(kDataIsCorrupt, result.trailer_state);
553 E : } else {
554 : // The byte being modified is in the body. Only expect to find
555 : // tampering if the block is quarantined or freed.
556 E : if (block_info.header->state != ALLOCATED_BLOCK) {
557 E : EXPECT_EQ(kDataIsCorrupt, result.block_state);
558 E : EXPECT_EQ(kDataIsClean, result.header_state);
559 E : EXPECT_EQ(kDataIsCorrupt, result.body_state);
560 E : EXPECT_EQ(kDataIsClean, result.trailer_state);
561 E : } else {
562 E : EXPECT_EQ(kDataIsClean, result.block_state);
563 E : EXPECT_EQ(kDataIsClean, result.header_state);
564 E : EXPECT_EQ(kDataIsClean, result.body_state);
565 E : EXPECT_EQ(kDataIsClean, result.trailer_state);
566 : }
567 : }
568 :
569 : // Restore the original value before returning.
570 E : *byte_to_modify = original_value;
571 E : return detected;
572 E : }
573 :
574 : bool ChecksumDetectsTampering(const BlockInfo& block_info,
575 E : void* address_to_modify) {
576 E : if (!ChecksumDetectsTamperingWithMask(block_info, address_to_modify, 0xFF))
577 E : return false;
578 E : return true;
579 E : }
580 :
581 E : void TestChecksumDetectsTampering(const BlockInfo& block_info) {
582 E : uint32 checksum = BlockCalculateChecksum(block_info);
583 E : block_info.header->checksum = checksum;
584 E : EXPECT_TRUE(BlockChecksumIsValid(block_info));
585 E : ++block_info.header->checksum;
586 E : EXPECT_FALSE(BlockChecksumIsValid(block_info));
587 E : BlockSetChecksum(block_info);
588 E : EXPECT_EQ(checksum, block_info.header->checksum);
589 :
590 : // A detailed block analysis should find nothing awry.
591 E : BlockAnalysisResult result = {};
592 : BlockAnalyze(static_cast<BlockState>(block_info.header->state), block_info,
593 E : &result);
594 E : EXPECT_EQ(kDataIsClean, result.block_state);
595 E : EXPECT_EQ(kDataIsClean, result.header_state);
596 E : EXPECT_EQ(kDataIsClean, result.body_state);
597 E : EXPECT_EQ(kDataIsClean, result.trailer_state);
598 :
599 : // Get the offset of the byte and the mask of the bits containing the
600 : // block state. This is resilient to changes in the BlockHeader layout.
601 E : if (state_offset == -1) {
602 E : BlockHeader header1 = {};
603 E : BlockHeader header2 = {};
604 E : header2.state = UINT_MAX;
605 : FindModifiedBits(sizeof(BlockHeader),
606 : reinterpret_cast<const uint8*>(&header1),
607 : reinterpret_cast<const uint8*>(&header2),
608 : &state_offset,
609 E : &state_mask);
610 : }
611 :
612 : // Header bytes should be tamper proof.
613 E : EXPECT_TRUE(ChecksumDetectsTampering(block_info, block_info.header));
614 : EXPECT_TRUE(ChecksumDetectsTampering(block_info,
615 E : &block_info.header->alloc_stack));
616 : EXPECT_TRUE(ChecksumDetectsTamperingWithMask(
617 : block_info,
618 : block_info.RawHeader() + state_offset,
619 E : state_mask));
620 :
621 : // Header padding should be tamper proof.
622 E : if (block_info.header_padding_size > 0) {
623 : EXPECT_TRUE(ChecksumDetectsTampering(block_info,
624 E : block_info.RawHeaderPadding() + block_info.header_padding_size / 2));
625 : }
626 :
627 : // Trailer padding should be tamper proof.
628 E : if (block_info.trailer_padding_size > 0) {
629 : EXPECT_TRUE(ChecksumDetectsTampering(block_info,
630 E : block_info.RawTrailerPadding() + block_info.trailer_padding_size / 2));
631 : }
632 :
633 : // Trailer bytes should be tamper proof.
634 E : EXPECT_TRUE(ChecksumDetectsTampering(block_info, block_info.trailer));
635 : EXPECT_TRUE(ChecksumDetectsTampering(block_info,
636 E : &block_info.trailer->heap_id));
637 :
638 : // Expect the checksum to detect body tampering in quarantined and freed
639 : // states, but not in the allocated state or flooded states.
640 : bool expected = block_info.header->state == QUARANTINED_BLOCK ||
641 E : block_info.header->state == FREED_BLOCK;
642 E : EXPECT_EQ(expected, ChecksumDetectsTampering(block_info, block_info.body));
643 : EXPECT_EQ(expected, ChecksumDetectsTampering(block_info,
644 E : block_info.RawBody() + block_info.body_size / 2));
645 : EXPECT_EQ(expected, ChecksumDetectsTampering(block_info,
646 E : block_info.RawBody() + block_info.body_size - 1));
647 E : }
648 :
649 : } // namespace
650 :
651 E : TEST_F(BlockTest, ChecksumDetectsTampering) {
652 : // This test requires a runtime because it makes use of BlockAnalyze.
653 : // Initialize it with valid values.
654 E : AsanRuntime runtime;
655 E : ASSERT_NO_FATAL_FAILURE(runtime.SetUp(L""));
656 E : HeapManagerInterface::HeapId valid_heap_id = runtime.GetProcessHeap();
657 E : runtime.AddThreadId(::GetCurrentThreadId());
658 E : common::StackCapture capture;
659 E : capture.InitFromStack();
660 : const common::StackCapture* valid_stack =
661 E : runtime.stack_cache()->SaveStackTrace(capture);
662 :
663 E : size_t kSizes[] = { 1, 4, 7, 16, 23, 32, 117, 1000, 4096 };
664 :
665 : // Doing a single allocation makes this test a bit faster.
666 E : size_t kAllocSize = 4 * 4096;
667 E : void* alloc = ::VirtualAlloc(NULL, kAllocSize, MEM_COMMIT, PAGE_READWRITE);
668 E : ASSERT_TRUE(alloc != NULL);
669 :
670 : // We test 9 different sizes, 9 different chunk sizes, 1 to 9 different
671 : // alignments, and 2 different redzone sizes. This is 810 different
672 : // combinations. We test each of these block allocations in all 4 possible
673 : // states. The probe itself tests the block at 7 to 9 different points, and
674 : // the tests require multiple iterations. Be careful playing with these
675 : // constants or the unittest time can easily spiral out of control! This
676 : // currently requires less than half a second, and is strictly CPU bound.
677 E : for (size_t chunk_size = kShadowRatio; chunk_size <= GetPageSize();
678 E : chunk_size *= 2) {
679 E : for (size_t align = kShadowRatio; align <= chunk_size; align *= 2) {
680 E : for (size_t redzone = 0; redzone <= chunk_size; redzone += chunk_size) {
681 E : for (size_t i = 0; i < arraysize(kSizes); ++i) {
682 E : BlockLayout layout = {};
683 : EXPECT_TRUE(BlockPlanLayout(chunk_size, align, kSizes[i], redzone,
684 E : redzone, &layout));
685 E : ASSERT_GT(kAllocSize, layout.block_size);
686 :
687 E : BlockInfo block_info = {};
688 E : BlockInitialize(layout, alloc, false, &block_info);
689 E : block_info.header->alloc_stack = valid_stack;
690 E : block_info.trailer->heap_id = valid_heap_id;
691 :
692 : // Test that the checksum detects tampering as expected in each block
693 : // state.
694 E : block_info.header->state = ALLOCATED_BLOCK;
695 E : ASSERT_NO_FATAL_FAILURE(TestChecksumDetectsTampering(block_info));
696 :
697 E : block_info.header->state = QUARANTINED_BLOCK;
698 E : block_info.header->free_stack = valid_stack;
699 E : block_info.trailer->free_tid = ::GetCurrentThreadId();
700 E : block_info.trailer->free_ticks = ::GetTickCount();
701 E : ASSERT_NO_FATAL_FAILURE(TestChecksumDetectsTampering(block_info));
702 :
703 E : block_info.header->state = QUARANTINED_FLOODED_BLOCK;
704 E : ::memset(block_info.body, kBlockFloodFillByte, block_info.body_size);
705 E : ASSERT_NO_FATAL_FAILURE(TestChecksumDetectsTampering(block_info));
706 :
707 E : block_info.header->state = FREED_BLOCK;
708 E : ASSERT_NO_FATAL_FAILURE(TestChecksumDetectsTampering(block_info));
709 E : } // kSizes[i]
710 E : } // redzone
711 E : } // align
712 E : } // chunk_size
713 :
714 E : ASSERT_EQ(TRUE, ::VirtualFree(alloc, 0, MEM_RELEASE));
715 E : ASSERT_NO_FATAL_FAILURE(runtime.TearDown());
716 E : }
717 :
718 E : TEST_F(BlockTest, BlockBodyIsFloodFilled) {
719 : static char dummy_body[3] = { 0x00, 0x00, 0x00 };
720 E : BlockInfo dummy_info = {};
721 E : dummy_info.body = reinterpret_cast<BlockBody*>(dummy_body);
722 E : dummy_info.body_size = sizeof(dummy_body);
723 E : for (size_t i = 0; i < arraysize(dummy_body); ++i) {
724 E : EXPECT_FALSE(BlockBodyIsFloodFilled(dummy_info));
725 E : dummy_body[i] = kBlockFloodFillByte;
726 E : }
727 E : EXPECT_TRUE(BlockBodyIsFloodFilled(dummy_info));
728 E : }
729 :
730 E : TEST_F(BlockTest, BlockDetermineMostLikelyState) {
731 E : AsanLogger logger;
732 E : Shadow shadow;
733 E : memory_notifiers::ShadowMemoryNotifier notifier(&shadow);
734 E : StackCaptureCache cache(&logger, ¬ifier);
735 :
736 : {
737 E : testing::FakeAsanBlock block1(&shadow, kShadowRatio, &cache);
738 E : block1.InitializeBlock(1024);
739 : EXPECT_EQ(ALLOCATED_BLOCK,
740 E : BlockDetermineMostLikelyState(&shadow, block1.block_info));
741 E : block1.block_info.header->state = ~block1.block_info.header->state;
742 : EXPECT_EQ(ALLOCATED_BLOCK,
743 E : BlockDetermineMostLikelyState(&shadow, block1.block_info));
744 E : block1.MarkBlockAsQuarantined();
745 : EXPECT_EQ(QUARANTINED_BLOCK,
746 E : BlockDetermineMostLikelyState(&shadow, block1.block_info));
747 E : block1.block_info.header->state = ~block1.block_info.header->state;
748 : EXPECT_EQ(QUARANTINED_BLOCK,
749 E : BlockDetermineMostLikelyState(&shadow, block1.block_info));
750 E : }
751 :
752 : {
753 E : testing::FakeAsanBlock block2(&shadow, kShadowRatio, &cache);
754 E : block2.InitializeBlock(1024);
755 : EXPECT_EQ(ALLOCATED_BLOCK,
756 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
757 E : block2.block_info.header->state = ~block2.block_info.header->state;
758 : EXPECT_EQ(ALLOCATED_BLOCK,
759 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
760 E : block2.MarkBlockAsQuarantinedFlooded();
761 : EXPECT_EQ(QUARANTINED_FLOODED_BLOCK,
762 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
763 E : block2.block_info.header->state = ~block2.block_info.header->state;
764 : EXPECT_EQ(QUARANTINED_FLOODED_BLOCK,
765 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
766 E : block2.block_info.RawBody(10) = 0;
767 : EXPECT_EQ(QUARANTINED_FLOODED_BLOCK,
768 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
769 E : ::memset(block2.block_info.body, 0, block2.block_info.body_size);
770 : EXPECT_EQ(QUARANTINED_BLOCK,
771 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
772 E : }
773 E : }
774 :
775 E : TEST_F(BlockTest, BitFlips) {
776 E : AsanLogger logger;
777 E : Shadow shadow;
778 E : memory_notifiers::ShadowMemoryNotifier notifier(&shadow);
779 E : StackCaptureCache cache(&logger, ¬ifier);
780 :
781 E : testing::FakeAsanBlock block1(&shadow, kShadowRatio, &cache);
782 E : block1.InitializeBlock(100);
783 E : block1.MarkBlockAsQuarantined();
784 E : size_t flips = 0;
785 :
786 : EXPECT_TRUE(
787 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 0));
788 E : flips = BlockBitFlipsRequired(QUARANTINED_BLOCK, block1.block_info, 3);
789 E : EXPECT_EQ(0u, flips);
790 :
791 E : block1.block_info.RawHeader(2) ^= 4;
792 : EXPECT_FALSE(
793 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 0));
794 : EXPECT_TRUE(
795 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 1));
796 E : flips = BlockBitFlipsRequired(QUARANTINED_BLOCK, block1.block_info, 3);
797 E : EXPECT_EQ(1u, flips);
798 :
799 E : block1.block_info.RawBody(5) ^= 2;
800 : EXPECT_FALSE(
801 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 0));
802 : EXPECT_TRUE(
803 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 2));
804 E : flips = BlockBitFlipsRequired(QUARANTINED_BLOCK, block1.block_info, 3);
805 E : EXPECT_LT(0u, flips);
806 E : EXPECT_GE(2u, flips);
807 :
808 E : block1.block_info.RawTrailer(3) ^= 1;
809 : EXPECT_FALSE(
810 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 0));
811 : EXPECT_TRUE(
812 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 3));
813 E : flips = BlockBitFlipsRequired(QUARANTINED_BLOCK, block1.block_info, 3);
814 E : EXPECT_LT(0u, flips);
815 E : EXPECT_GE(3u, flips);
816 E : }
817 :
818 : } // namespace asan
819 : } // namespace agent
|