1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/block.h"
16 :
17 : #include <memory>
18 : #include <set>
19 :
20 : #include "windows.h"
21 :
22 : #include "gtest/gtest.h"
23 : #include "syzygy/agent/asan/page_protection_helpers.h"
24 : #include "syzygy/agent/asan/runtime.h"
25 : #include "syzygy/agent/asan/unittest_util.h"
26 :
27 : namespace agent {
28 : namespace asan {
29 :
30 : namespace {
31 :
32 : using testing::_;
33 :
34 : BlockLayout BuildBlockLayout(size_t block_alignment,
35 : size_t block_size,
36 : size_t header_size,
37 : size_t header_padding_size,
38 : size_t body_size,
39 : size_t trailer_padding_size,
40 E : size_t trailer_size) {
41 E : BlockLayout layout = { block_alignment, block_size, header_size,
42 E : header_padding_size, body_size, trailer_padding_size, trailer_size };
43 E : return layout;
44 E : }
45 :
46 : // Checks that the given block is valid, and initialized as expected.
47 E : void IsValidBlockImpl(const BlockInfo& block, bool just_initialized) {
48 E : EXPECT_EQ(0u, block.block_size % kShadowRatio);
49 :
50 : // Validate the layout of the block.
51 E : EXPECT_TRUE(block.header != nullptr);
52 E : EXPECT_EQ(0u, block.block_size % kShadowRatio);
53 E : EXPECT_EQ(0u, block.header_padding_size % kShadowRatio);
54 E : EXPECT_EQ(block.RawHeader() + sizeof(BlockHeader),
55 E : block.RawHeaderPadding());
56 E : EXPECT_EQ(block.RawHeaderPadding() + block.header_padding_size,
57 E : block.RawBody());
58 E : EXPECT_EQ(block.RawBody() + block.body_size,
59 E : block.RawTrailerPadding());
60 E : EXPECT_EQ(block.RawTrailerPadding() + block.trailer_padding_size,
61 E : block.RawTrailer());
62 E : EXPECT_EQ(block.RawHeader() + block.block_size,
63 E : block.RawTrailer() + sizeof(BlockTrailer));
64 :
65 : // Validate the actual contents of the various parts of the block.
66 :
67 : // Check the header.
68 E : EXPECT_EQ(kBlockHeaderMagic, block.header->magic);
69 E : EXPECT_FALSE(block.header->is_nested);
70 E : EXPECT_LT(0u, block.header->body_size);
71 E : EXPECT_EQ(block.header->body_size, block.body_size);
72 E : if (just_initialized) {
73 E : EXPECT_EQ(0u, block.header->checksum);
74 E : EXPECT_EQ(NULL, block.header->alloc_stack);
75 E : EXPECT_EQ(NULL, block.header->free_stack);
76 E : EXPECT_EQ(ALLOCATED_BLOCK, block.header->state);
77 : }
78 :
79 : // By default we assume the blocks to not be nested.
80 E : EXPECT_FALSE(block.header->is_nested);
81 E : EXPECT_EQ(static_cast<bool>(block.header->is_nested), block.is_nested);
82 :
83 : // Check the header padding.
84 E : if (block.header->has_header_padding) {
85 E : EXPECT_LE(kShadowRatio, block.header_padding_size);
86 E : EXPECT_EQ(block.header_padding_size,
87 E : *reinterpret_cast<const uint32_t*>(block.header_padding));
88 E : EXPECT_EQ(block.header_padding_size,
89 : *reinterpret_cast<const uint32_t*>(block.RawHeaderPadding() +
90 : block.header_padding_size -
91 E : sizeof(uint32_t)));
92 E : for (size_t i = sizeof(uint32_t);
93 E : i < block.header_padding_size - sizeof(uint32_t); ++i) {
94 E : EXPECT_EQ(kBlockHeaderPaddingByte, block.RawHeaderPadding(i));
95 E : }
96 : }
97 :
98 : // Check the trailer padding.
99 E : size_t start_of_trailer_iteration = 0;
100 E : if (block.header->has_excess_trailer_padding) {
101 E : start_of_trailer_iteration = 4;
102 E : EXPECT_EQ(block.trailer_padding_size,
103 E : *reinterpret_cast<const uint32_t*>(block.trailer_padding));
104 : }
105 E : for (size_t i = start_of_trailer_iteration; i < block.trailer_padding_size;
106 E : ++i) {
107 E : EXPECT_EQ(kBlockTrailerPaddingByte, block.RawTrailerPadding(i));
108 E : }
109 :
110 : // Check the trailer.
111 E : EXPECT_NE(0u, block.trailer->alloc_tid);
112 E : EXPECT_GE(::GetTickCount(), block.trailer->alloc_ticks);
113 E : if (just_initialized) {
114 E : EXPECT_EQ(0u, block.trailer->free_tid);
115 E : EXPECT_EQ(0u, block.trailer->free_ticks);
116 : }
117 E : }
118 :
119 E : void IsValidInitializedBlock(const BlockInfo& block) {
120 E : IsValidBlockImpl(block, true);
121 E : }
122 :
123 : void IsValidBlock(const BlockInfo& block) {
124 : IsValidBlockImpl(block, false);
125 : }
126 :
127 : class BlockTest : public testing::OnExceptionCallbackTest {
128 : public:
129 : using Super = testing::OnExceptionCallbackTest;
130 :
131 E : void SetUp() override {
132 E : Super::SetUp();
133 E : shadow_.SetUp();
134 E : }
135 :
136 E : void TearDown() override {
137 E : shadow_.TearDown();
138 E : Super::TearDown();
139 E : }
140 :
141 : Shadow shadow_;
142 : };
143 :
144 : } // namespace
145 :
146 E : bool operator==(const BlockLayout& bl1, const BlockLayout& bl2) {
147 E : return ::memcmp(&bl1, &bl2, sizeof(BlockLayout)) == 0;
148 E : }
149 :
150 E : bool operator==(const BlockInfo& bi1, const BlockInfo& bi2) {
151 E : return ::memcmp(&bi1, &bi2, sizeof(BlockInfo)) == 0;
152 E : }
153 :
154 E : TEST_F(BlockTest, BlockPlanLayout) {
155 E : BlockLayout layout = {};
156 :
157 : // Zero sized allocations should work fine.
158 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 0, 0, 0, &layout));
159 E : EXPECT_EQ(BuildBlockLayout(8, 40, 16, 0, 0, 4, 20), layout);
160 :
161 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 60, 32, 32, &layout));
162 E : EXPECT_EQ(BuildBlockLayout(8, 128, 16, 16, 60, 16, 20), layout);
163 :
164 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 60, 0, 0, &layout));
165 E : EXPECT_EQ(BuildBlockLayout(8, 96, 16, 0, 60, 0, 20), layout);
166 :
167 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 64, 0, 0, &layout));
168 E : EXPECT_EQ(BuildBlockLayout(8, 104, 16, 0, 64, 4, 20), layout);
169 :
170 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 61, 0, 0, &layout));
171 E : EXPECT_EQ(BuildBlockLayout(8, 104, 16, 0, 61, 7, 20), layout);
172 :
173 : // Plan a layout that would use guard pages.
174 E : EXPECT_TRUE(BlockPlanLayout(4096, 8, 100, 4096, 4096, &layout));
175 E : EXPECT_EQ(BuildBlockLayout(4096, 3 * 4096, 16, 8072, 100, 4080, 20), layout);
176 :
177 : // Plan a layout with an invalid size, this should fail.
178 E : EXPECT_FALSE(BlockPlanLayout(8, 8, 0xffffffff, 0, 0, &layout));
179 E : }
180 :
181 E : TEST_F(BlockTest, EndToEnd) {
182 E : BlockLayout layout = {};
183 E : BlockInfo block_info = {};
184 :
185 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 4, 0, 0, &layout));
186 E : std::unique_ptr<uint8_t[]> block_data(new uint8_t[layout.block_size]);
187 E : ::memset(block_data.get(), 0, layout.block_size);
188 E : BlockInitialize(layout, block_data.get(), false, &block_info);
189 E : EXPECT_NO_FATAL_FAILURE(IsValidInitializedBlock(block_info));
190 E : block_data.reset(nullptr);
191 :
192 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 61, 0, 0, &layout));
193 E : block_data.reset(new uint8_t[layout.block_size]);
194 E : ::memset(block_data.get(), 0, layout.block_size);
195 E : BlockInitialize(layout, block_data.get(), false, &block_info);
196 E : EXPECT_NO_FATAL_FAILURE(IsValidInitializedBlock(block_info));
197 E : block_data.reset(nullptr);
198 :
199 E : EXPECT_TRUE(BlockPlanLayout(8, 8, 60, 32, 32, &layout));
200 E : block_data.reset(new uint8_t[layout.block_size]);
201 E : ::memset(block_data.get(), 0, layout.block_size);
202 E : BlockInitialize(layout, block_data.get(), false, &block_info);
203 E : EXPECT_NO_FATAL_FAILURE(IsValidInitializedBlock(block_info));
204 E : block_data.reset(nullptr);
205 :
206 : // Do an allocation that uses entire pages.
207 E : EXPECT_TRUE(BlockPlanLayout(4096, 8, 100, 4096, 4096, &layout));
208 E : void* data = ::VirtualAlloc(NULL, layout.block_size, MEM_COMMIT,
209 : PAGE_READWRITE);
210 E : ::memset(data, 0, layout.block_size);
211 E : BlockInitialize(layout, data, false, &block_info);
212 E : EXPECT_NO_FATAL_FAILURE(IsValidInitializedBlock(block_info));
213 E : ASSERT_EQ(TRUE, ::VirtualFree(data, 0, MEM_RELEASE));
214 E : }
215 :
216 E : TEST_F(BlockTest, GetHeaderFromBody) {
217 : // Plan two layouts, one with header padding and another without.
218 E : BlockLayout layout1 = {};
219 E : BlockLayout layout2 = {};
220 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 0, 0, &layout1));
221 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 32, 0, &layout2));
222 :
223 E : std::unique_ptr<uint8_t[]> data(new uint8_t[layout2.block_size]);
224 E : ::memset(data.get(), 0, layout2.block_size);
225 :
226 : // First try navigating a block without header padding.
227 E : BlockInfo info = {};
228 E : BlockInitialize(layout1, data.get(), false, &info);
229 : // This should succeed as expected.
230 E : EXPECT_EQ(info.header, BlockGetHeaderFromBody(info.body));
231 : // This fails because of invalid alignment.
232 E : EXPECT_TRUE(BlockGetHeaderFromBody(
233 E : reinterpret_cast<BlockBody*>(info.RawBody() + 1)) == nullptr);
234 : // This fails because the pointer is not at the beginning of the
235 : // body.
236 E : EXPECT_TRUE(BlockGetHeaderFromBody(
237 E : reinterpret_cast<BlockBody*>(info.RawBody() + kShadowRatio)) == nullptr);
238 : // This fails because of invalid header magic.
239 E : ++info.header->magic;
240 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
241 : // This fails because the header indicates there's padding.
242 E : --info.header->magic;
243 E : info.header->has_header_padding = 1;
244 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
245 :
246 : // Now navigate a block with header padding.
247 E : BlockInitialize(layout2, data.get(), false, &info);
248 : // This should succeed as expected.
249 E : EXPECT_EQ(info.header, BlockGetHeaderFromBody(info.body));
250 : // This fails because of invalid alignment.
251 E : EXPECT_TRUE(BlockGetHeaderFromBody(
252 E : reinterpret_cast<BlockBody*>(info.RawBody() + 1)) == nullptr);
253 : // This fails because the pointer is not at the beginning of the
254 : // body.
255 E : EXPECT_TRUE(BlockGetHeaderFromBody(
256 E : reinterpret_cast<BlockBody*>(info.RawBody() + kShadowRatio)) == nullptr);
257 : // This fails because of invalid header magic.
258 E : ++info.header->magic;
259 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
260 : // This fails because the header indicates there's no padding.
261 E : --info.header->magic;
262 E : info.header->has_header_padding = 0;
263 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
264 : // This fails because the padding length is invalid.
265 E : info.header->has_header_padding = 1;
266 E : uint32_t* head = reinterpret_cast<uint32_t*>(info.header_padding);
267 E : uint32_t* tail = head + (info.header_padding_size / sizeof(uint32_t)) - 1;
268 E : ++(*tail);
269 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
270 : // This fails because the padding lengths don't agree.
271 E : --(*tail);
272 E : ++(*head);
273 E : EXPECT_TRUE(BlockGetHeaderFromBody(info.body) == nullptr);
274 E : }
275 :
276 E : TEST_F(BlockTest, GetHeaderFromBodyProtectedMemory) {
277 E : BlockLayout layout = {};
278 E : EXPECT_TRUE(BlockPlanLayout(4096, 4096, 4096, 4096, 4096, &layout));
279 E : void* alloc = ::VirtualAlloc(NULL, layout.block_size, MEM_COMMIT,
280 : PAGE_READWRITE);
281 E : ASSERT_TRUE(alloc != NULL);
282 E : BlockInfo block_info = {};
283 E : BlockInitialize(layout, alloc, false, &block_info);
284 :
285 E : BlockProtectRedzones(block_info, &shadow_);
286 E : EXPECT_CALL(*this, OnExceptionCallback(_));
287 E : EXPECT_TRUE(BlockGetHeaderFromBody(block_info.body) == NULL);
288 E : testing::Mock::VerifyAndClearExpectations(this);
289 E : BlockProtectNone(block_info, &shadow_);
290 :
291 E : ASSERT_EQ(TRUE, ::VirtualFree(alloc, 0, MEM_RELEASE));
292 E : }
293 :
294 E : TEST_F(BlockTest, ConvertBlockInfo) {
295 E : BlockLayout layout = {};
296 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 0, 0, &layout));
297 :
298 E : std::unique_ptr<uint8_t[]> data(new uint8_t[layout.block_size]);
299 E : ::memset(data.get(), 0, layout.block_size);
300 :
301 E : BlockInfo expanded = {};
302 E : BlockInitialize(layout, data.get(), false, &expanded);
303 :
304 E : CompactBlockInfo compact = {};
305 E : ConvertBlockInfo(expanded, &compact);
306 E : EXPECT_EQ(layout.block_size, compact.block_size);
307 E : EXPECT_EQ(layout.header_size + layout.header_padding_size,
308 E : compact.header_size);
309 E : EXPECT_EQ(layout.trailer_size + layout.trailer_padding_size,
310 E : compact.trailer_size);
311 E : EXPECT_FALSE(compact.is_nested);
312 :
313 E : BlockInfo expanded2 = {};
314 E : ConvertBlockInfo(compact, &expanded2);
315 E : EXPECT_EQ(0, ::memcmp(&expanded, &expanded2, sizeof(expanded)));
316 E : }
317 :
318 E : TEST_F(BlockTest, BlockInfoFromMemory) {
319 : // Plan two layouts, one with header padding and another without.
320 E : BlockLayout layout1 = {};
321 E : BlockLayout layout2 = {};
322 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 0, 0, &layout1));
323 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 32, 0, &layout2));
324 :
325 E : std::unique_ptr<uint8_t[]> data(new uint8_t[layout2.block_size]);
326 E : ::memset(data.get(), 0, layout2.block_size);
327 :
328 : // First recover a block without header padding.
329 E : BlockInfo info = {};
330 E : BlockInitialize(layout1, data.get(), false, &info);
331 E : BlockInfo info_recovered = {};
332 E : EXPECT_TRUE(BlockInfoFromMemory(info.header, &info_recovered));
333 E : EXPECT_EQ(info, info_recovered);
334 : // Failed because its not aligned.
335 E : EXPECT_FALSE(BlockInfoFromMemory(
336 : reinterpret_cast<BlockHeader*>(info.RawHeader() + 1),
337 E : &info_recovered));
338 : // Failed because the magic is invalid.
339 E : ++info.header->magic;
340 E : EXPECT_FALSE(BlockInfoFromMemory(info.header, &info_recovered));
341 E : --info.header->magic;
342 : // This fails because the header indicates there's padding yet there is
343 : // none.
344 E : info.header->has_header_padding = 1;
345 E : EXPECT_FALSE(BlockInfoFromMemory(info.header, &info_recovered));
346 :
347 : // Now recover a block with header padding.
348 E : BlockInitialize(layout2, data.get(), false, &info);
349 E : EXPECT_TRUE(BlockInfoFromMemory(info.header, &info_recovered));
350 E : EXPECT_EQ(info, info_recovered);
351 : // Failed because the magic is invalid.
352 E : ++info.header->magic;
353 E : EXPECT_FALSE(BlockInfoFromMemory(info.header, &info_recovered));
354 E : --info.header->magic;
355 : // Failed because the header padding lengths don't match.
356 E : uint32_t* head = reinterpret_cast<uint32_t*>(info.header_padding);
357 E : uint32_t* tail = head + (info.header_padding_size / sizeof(uint32_t)) - 1;
358 E : ++(*tail);
359 E : EXPECT_FALSE(BlockInfoFromMemory(info.header, &info_recovered));
360 E : --(*tail);
361 :
362 : // Finally ensure that we can recover information about blocks of various
363 : // sizes.
364 E : const size_t kAllocSize = 3 * GetPageSize();
365 E : void* alloc = ::VirtualAlloc(NULL, kAllocSize, MEM_COMMIT, PAGE_READWRITE);
366 E : for (size_t block_size = 0; block_size < kShadowRatio * 2; ++block_size) {
367 E : BlockLayout layout = {};
368 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, block_size, 0, 0,
369 E : &layout));
370 E : ASSERT_LE(layout.block_size, kAllocSize);
371 E : BlockInitialize(layout, alloc, false, &info);
372 E : EXPECT_TRUE(BlockInfoFromMemory(info.header, &info_recovered));
373 E : EXPECT_EQ(info.body_size, info_recovered.body_size);
374 E : EXPECT_EQ(info, info_recovered);
375 :
376 E : EXPECT_TRUE(BlockPlanLayout(4096, 4096, block_size, 4096, 4096,
377 E : &layout));
378 E : ASSERT_LE(layout.block_size, kAllocSize);
379 E : BlockInitialize(layout, alloc, false, &info);
380 E : EXPECT_TRUE(BlockInfoFromMemory(info.header, &info_recovered));
381 E : EXPECT_EQ(info.body_size, info_recovered.body_size);
382 E : EXPECT_EQ(info, info_recovered);
383 E : }
384 E : ::VirtualFree(alloc, 0, MEM_RELEASE);
385 E : }
386 :
387 E : TEST_F(BlockTest, BlockInfoFromMemoryInvalidPadding) {
388 E : BlockLayout layout = {};
389 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10,
390 E : 4 * sizeof(BlockHeader), 0, &layout));
391 :
392 E : std::unique_ptr<uint8_t[]> data(new uint8_t[layout.block_size]);
393 E : ::memset(data.get(), 0, layout.block_size);
394 :
395 E : BlockInfo info = {};
396 E : BlockInitialize(layout, data.get(), false, &info);
397 E : EXPECT_EQ(1, info.header->has_header_padding);
398 E : BlockInfo info_recovered = {};
399 E : EXPECT_TRUE(BlockInfoFromMemory(info.header, &info_recovered));
400 E : EXPECT_EQ(info, info_recovered);
401 :
402 : // Invalidates the padding size and make sure that we can't retrieve the block
403 : // information.
404 E : size_t* padding_size = reinterpret_cast<size_t*>(info.header + 1);
405 E : EXPECT_GE(*padding_size, 2 * sizeof(uint32_t));
406 E : for (*padding_size = 0; *padding_size < 2 * sizeof(uint32_t);
407 E : ++(*padding_size)) {
408 E : EXPECT_FALSE(BlockInfoFromMemory(info.header, &info_recovered));
409 E : }
410 E : }
411 :
412 E : TEST_F(BlockTest, BlockInfoFromMemoryProtectedMemory) {
413 E : BlockLayout layout = {};
414 E : EXPECT_TRUE(BlockPlanLayout(4096, 4096, 4096, 4096, 4096, &layout));
415 E : void* alloc = ::VirtualAlloc(NULL, layout.block_size, MEM_COMMIT,
416 : PAGE_READWRITE);
417 E : ASSERT_TRUE(alloc != NULL);
418 E : BlockInfo block_info = {};
419 E : BlockInitialize(layout, alloc, false, &block_info);
420 :
421 E : BlockProtectRedzones(block_info, &shadow_);
422 E : BlockInfo recovered_info = {};
423 E : EXPECT_CALL(*this, OnExceptionCallback(_));
424 E : EXPECT_FALSE(BlockInfoFromMemory(block_info.header, &recovered_info));
425 E : testing::Mock::VerifyAndClearExpectations(this);
426 E : BlockProtectNone(block_info, &shadow_);
427 :
428 E : ASSERT_EQ(TRUE, ::VirtualFree(alloc, 0, MEM_RELEASE));
429 E : }
430 :
431 E : TEST_F(BlockTest, BlockInfoFromMemoryForNestedBlock) {
432 E : BlockLayout layout = {};
433 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 0, 0, &layout));
434 :
435 E : std::unique_ptr<uint8_t[]> data(new uint8_t[layout.block_size]);
436 E : BlockInfo block_info = {};
437 E : BlockInitialize(layout, data.get(), true, &block_info);
438 :
439 E : BlockInfo recovered_info = {};
440 E : EXPECT_TRUE(BlockInfoFromMemory(block_info.header, &recovered_info));
441 :
442 E : EXPECT_TRUE(recovered_info.is_nested);
443 E : EXPECT_TRUE(recovered_info.header->is_nested);
444 E : }
445 :
446 E : TEST_F(BlockTest, ChecksumWorksForAllStates) {
447 E : BlockLayout layout = {};
448 E : EXPECT_TRUE(BlockPlanLayout(kShadowRatio, kShadowRatio, 10, 0, 0, &layout));
449 E : std::unique_ptr<uint8_t[]> data(new uint8_t[layout.block_size]);
450 E : ::memset(data.get(), 0, layout.block_size);
451 E : BlockInfo info = {};
452 E : BlockInitialize(layout, data.get(), false, &info);
453 E : while (true) {
454 E : BlockCalculateChecksum(info);
455 E : ++info.header->state;
456 E : if (info.header->state == 0)
457 E : break;
458 E : }
459 E : }
460 :
461 : namespace {
462 :
463 : // Given two arrays of data, compares them byte-by-byte to find the first
464 : // byte with altered data. Within that byte determines the mask of bits that
465 : // have been altered. Returns the results via |offset| and |mask|.
466 : void FindModifiedBits(size_t length,
467 : const uint8_t* buffer1,
468 : const uint8_t* buffer2,
469 : size_t* offset,
470 E : uint8_t* mask) {
471 E : ASSERT_TRUE(buffer1 != NULL);
472 E : ASSERT_TRUE(buffer2 != NULL);
473 E : ASSERT_TRUE(offset != NULL);
474 E : ASSERT_TRUE(mask != NULL);
475 :
476 E : for (size_t i = 0; i < length; ++i) {
477 E : if (buffer1[i] != buffer2[i]) {
478 E : *offset = i;
479 E : *mask = buffer1[i] ^ buffer2[i];
480 E : return;
481 : }
482 E : }
483 :
484 i : *offset = 0;
485 i : *mask = 0;
486 E : }
487 :
488 : // This is initialized by TestChecksumDetectsTampering, but referred to by
489 : // ChecksumDetectsTamperingWithMask as well, hence not in a function.
490 : size_t state_offset = SIZE_MAX;
491 : uint8_t state_mask = 0;
492 :
493 : bool ChecksumDetectsTamperingWithMask(const BlockInfo& block_info,
494 : void* address_to_modify,
495 E : uint8_t mask_to_modify) {
496 E : uint8_t* byte_to_modify = reinterpret_cast<uint8_t*>(address_to_modify);
497 :
498 : // Remember the original contents.
499 E : uint8_t original_value = *byte_to_modify;
500 E : uint8_t original_bits = original_value & ~mask_to_modify;
501 :
502 : // Since the checksum can collide we check a handful of times to build up
503 : // some confidence. Since we sometimes expect this to return false the number
504 : // of iterations needs to be kept reasonably low to keep the unittest fast.
505 E : bool detected = false;
506 E : BlockSetChecksum(block_info);
507 E : uint32_t checksum = block_info.header->checksum;
508 E : for (size_t i = 0; i < 4; ++i) {
509 : // Modify the value, altering only bits in |mask_to_modify|.
510 E : while (true) {
511 E : ++(*byte_to_modify);
512 E : if (((*byte_to_modify) & ~mask_to_modify) == original_bits)
513 E : break;
514 E : }
515 E : BlockSetChecksum(block_info);
516 E : if (block_info.header->checksum != checksum) {
517 : // Success, the checksum detected the change!
518 : // Restore the original checksum so the block analysis can continue.
519 E : block_info.header->checksum = checksum;
520 E : detected = true;
521 E : break;
522 : }
523 E : }
524 :
525 : // Run a detailed analysis on the block. We expect the results of this to
526 : // agree with where the block was modified.
527 E : BlockAnalysisResult result = {};
528 E : BlockAnalyze(static_cast<BlockState>(block_info.header->state), block_info,
529 : &result);
530 E : if (address_to_modify < block_info.body) {
531 E : EXPECT_EQ(kDataIsCorrupt, result.block_state);
532 : // If the thing being modified is the block state, then this is so
533 : // localized that the analysis will sometimes mess up. Seeing this in
534 : // the wild is quite unlikely.
535 : // TODO(chrisha): If we ever have individual checksums for the header,
536 : // the body and the trailer, then revisit this.
537 E : if (address_to_modify != block_info.RawHeader() + state_offset ||
538 : mask_to_modify != state_mask) {
539 E : EXPECT_EQ(kDataIsCorrupt, result.header_state);
540 E : EXPECT_EQ(kDataStateUnknown, result.body_state);
541 E : EXPECT_EQ(kDataIsClean, result.trailer_state);
542 E : }
543 E : } else if (address_to_modify >= block_info.trailer_padding) {
544 E : EXPECT_EQ(kDataIsCorrupt, result.block_state);
545 E : EXPECT_EQ(kDataIsClean, result.header_state);
546 E : EXPECT_EQ(kDataStateUnknown, result.body_state);
547 E : EXPECT_EQ(kDataIsCorrupt, result.trailer_state);
548 E : } else {
549 : // The byte being modified is in the body. Only expect to find
550 : // tampering if the block is quarantined or freed.
551 E : if (block_info.header->state != ALLOCATED_BLOCK) {
552 E : EXPECT_EQ(kDataIsCorrupt, result.block_state);
553 E : EXPECT_EQ(kDataIsClean, result.header_state);
554 E : EXPECT_EQ(kDataIsCorrupt, result.body_state);
555 E : EXPECT_EQ(kDataIsClean, result.trailer_state);
556 E : } else {
557 E : EXPECT_EQ(kDataIsClean, result.block_state);
558 E : EXPECT_EQ(kDataIsClean, result.header_state);
559 E : EXPECT_EQ(kDataIsClean, result.body_state);
560 E : EXPECT_EQ(kDataIsClean, result.trailer_state);
561 : }
562 : }
563 :
564 : // Restore the original value before returning.
565 E : *byte_to_modify = original_value;
566 E : return detected;
567 E : }
568 :
569 : bool ChecksumDetectsTampering(const BlockInfo& block_info,
570 E : void* address_to_modify) {
571 E : if (!ChecksumDetectsTamperingWithMask(block_info, address_to_modify, 0xFF))
572 E : return false;
573 E : return true;
574 E : }
575 :
576 E : void TestChecksumDetectsTampering(const BlockInfo& block_info) {
577 E : uint32_t checksum = BlockCalculateChecksum(block_info);
578 E : block_info.header->checksum = checksum;
579 E : EXPECT_TRUE(BlockChecksumIsValid(block_info));
580 E : ++block_info.header->checksum;
581 E : EXPECT_FALSE(BlockChecksumIsValid(block_info));
582 E : BlockSetChecksum(block_info);
583 E : EXPECT_EQ(checksum, block_info.header->checksum);
584 :
585 : // A detailed block analysis should find nothing awry.
586 E : BlockAnalysisResult result = {};
587 E : BlockAnalyze(static_cast<BlockState>(block_info.header->state), block_info,
588 : &result);
589 E : EXPECT_EQ(kDataIsClean, result.block_state);
590 E : EXPECT_EQ(kDataIsClean, result.header_state);
591 E : EXPECT_EQ(kDataIsClean, result.body_state);
592 E : EXPECT_EQ(kDataIsClean, result.trailer_state);
593 :
594 : // Get the offset of the byte and the mask of the bits containing the
595 : // block state. This is resilient to changes in the BlockHeader layout.
596 E : if (state_offset == -1) {
597 E : BlockHeader header1 = {};
598 E : BlockHeader header2 = {};
599 E : header2.state = ~header2.state;
600 E : FindModifiedBits(
601 : sizeof(BlockHeader), reinterpret_cast<const uint8_t*>(&header1),
602 : reinterpret_cast<const uint8_t*>(&header2), &state_offset, &state_mask);
603 : }
604 :
605 : // Header bytes should be tamper proof.
606 E : EXPECT_TRUE(ChecksumDetectsTampering(block_info, block_info.header));
607 E : EXPECT_TRUE(ChecksumDetectsTampering(block_info,
608 E : &block_info.header->alloc_stack));
609 E : EXPECT_TRUE(ChecksumDetectsTamperingWithMask(
610 : block_info,
611 : block_info.RawHeader() + state_offset,
612 E : state_mask));
613 :
614 : // Header padding should be tamper proof.
615 E : if (block_info.header_padding_size > 0) {
616 E : EXPECT_TRUE(ChecksumDetectsTampering(block_info,
617 E : block_info.RawHeaderPadding() + block_info.header_padding_size / 2));
618 : }
619 :
620 : // Trailer padding should be tamper proof.
621 E : if (block_info.trailer_padding_size > 0) {
622 E : EXPECT_TRUE(ChecksumDetectsTampering(block_info,
623 E : block_info.RawTrailerPadding() + block_info.trailer_padding_size / 2));
624 : }
625 :
626 : // Trailer bytes should be tamper proof.
627 E : EXPECT_TRUE(ChecksumDetectsTampering(block_info, block_info.trailer));
628 E : EXPECT_TRUE(ChecksumDetectsTampering(block_info,
629 E : &block_info.trailer->heap_id));
630 :
631 : // Expect the checksum to detect body tampering in quarantined and freed
632 : // states, but not in the allocated state or flooded states.
633 E : bool expected = block_info.header->state == QUARANTINED_BLOCK ||
634 : block_info.header->state == FREED_BLOCK;
635 E : EXPECT_EQ(expected, ChecksumDetectsTampering(block_info, block_info.body));
636 E : EXPECT_EQ(expected, ChecksumDetectsTampering(block_info,
637 E : block_info.RawBody() + block_info.body_size / 2));
638 E : EXPECT_EQ(expected, ChecksumDetectsTampering(block_info,
639 E : block_info.RawBody() + block_info.body_size - 1));
640 E : }
641 :
642 : } // namespace
643 :
644 E : TEST_F(BlockTest, ChecksumDetectsTampering) {
645 : // This test requires a runtime because it makes use of BlockAnalyze.
646 : // Initialize it with valid values.
647 E : AsanRuntime runtime;
648 E : ASSERT_NO_FATAL_FAILURE(runtime.SetUp(L""));
649 E : HeapManagerInterface::HeapId valid_heap_id = runtime.GetProcessHeap();
650 E : runtime.AddThreadId(::GetCurrentThreadId());
651 E : common::StackCapture capture;
652 E : capture.InitFromStack();
653 : const common::StackCapture* valid_stack =
654 E : runtime.stack_cache()->SaveStackTrace(capture);
655 :
656 E : size_t kSizes[] = { 1, 4, 7, 16, 23, 32, 117, 1000, 4096 };
657 :
658 : // Doing a single allocation makes this test a bit faster.
659 E : size_t kAllocSize = 4 * 4096;
660 E : void* alloc = ::VirtualAlloc(NULL, kAllocSize, MEM_COMMIT, PAGE_READWRITE);
661 E : ASSERT_TRUE(alloc != NULL);
662 :
663 : // We test 9 different sizes, 9 different chunk sizes, 1 to 9 different
664 : // alignments, and 2 different redzone sizes. This is 810 different
665 : // combinations. We test each of these block allocations in all 4 possible
666 : // states. The probe itself tests the block at 7 to 9 different points, and
667 : // the tests require multiple iterations. Be careful playing with these
668 : // constants or the unittest time can easily spiral out of control! This
669 : // currently requires less than half a second, and is strictly CPU bound.
670 E : for (size_t chunk_size = kShadowRatio; chunk_size <= GetPageSize();
671 E : chunk_size *= 2) {
672 E : for (size_t align = kShadowRatio; align <= chunk_size; align *= 2) {
673 E : for (size_t redzone = 0; redzone <= chunk_size; redzone += chunk_size) {
674 E : for (size_t i = 0; i < arraysize(kSizes); ++i) {
675 E : BlockLayout layout = {};
676 E : EXPECT_TRUE(BlockPlanLayout(chunk_size, align, kSizes[i], redzone,
677 E : redzone, &layout));
678 E : ASSERT_GT(kAllocSize, layout.block_size);
679 :
680 E : BlockInfo block_info = {};
681 E : BlockInitialize(layout, alloc, false, &block_info);
682 E : block_info.header->alloc_stack = valid_stack;
683 E : block_info.trailer->heap_id = valid_heap_id;
684 :
685 : // Test that the checksum detects tampering as expected in each block
686 : // state.
687 E : block_info.header->state = ALLOCATED_BLOCK;
688 E : ASSERT_NO_FATAL_FAILURE(TestChecksumDetectsTampering(block_info));
689 :
690 E : block_info.header->state = QUARANTINED_BLOCK;
691 E : block_info.header->free_stack = valid_stack;
692 E : block_info.trailer->free_tid = ::GetCurrentThreadId();
693 E : block_info.trailer->free_ticks = ::GetTickCount();
694 E : ASSERT_NO_FATAL_FAILURE(TestChecksumDetectsTampering(block_info));
695 :
696 E : block_info.header->state = QUARANTINED_FLOODED_BLOCK;
697 E : ::memset(block_info.body, kBlockFloodFillByte, block_info.body_size);
698 E : ASSERT_NO_FATAL_FAILURE(TestChecksumDetectsTampering(block_info));
699 :
700 E : block_info.header->state = FREED_BLOCK;
701 E : ASSERT_NO_FATAL_FAILURE(TestChecksumDetectsTampering(block_info));
702 E : } // kSizes[i]
703 E : } // redzone
704 E : } // align
705 E : } // chunk_size
706 :
707 E : ASSERT_EQ(TRUE, ::VirtualFree(alloc, 0, MEM_RELEASE));
708 E : ASSERT_NO_FATAL_FAILURE(runtime.TearDown());
709 E : }
710 :
711 E : TEST_F(BlockTest, BlockBodyIsFloodFilled) {
712 : static char dummy_body[3] = { 0x00, 0x00, 0x00 };
713 E : BlockInfo dummy_info = {};
714 E : dummy_info.body = reinterpret_cast<BlockBody*>(dummy_body);
715 E : dummy_info.body_size = sizeof(dummy_body);
716 E : for (size_t i = 0; i < arraysize(dummy_body); ++i) {
717 E : EXPECT_FALSE(BlockBodyIsFloodFilled(dummy_info));
718 E : dummy_body[i] = kBlockFloodFillByte;
719 E : }
720 E : EXPECT_TRUE(BlockBodyIsFloodFilled(dummy_info));
721 E : }
722 :
723 E : TEST_F(BlockTest, BlockDetermineMostLikelyState) {
724 E : AsanLogger logger;
725 E : Shadow shadow;
726 E : memory_notifiers::ShadowMemoryNotifier notifier(&shadow);
727 E : StackCaptureCache cache(&logger, ¬ifier);
728 :
729 : {
730 E : testing::FakeAsanBlock block1(&shadow, kShadowRatio, &cache);
731 E : block1.InitializeBlock(1024);
732 E : EXPECT_EQ(ALLOCATED_BLOCK,
733 E : BlockDetermineMostLikelyState(&shadow, block1.block_info));
734 E : block1.block_info.header->state = ~block1.block_info.header->state;
735 E : EXPECT_EQ(ALLOCATED_BLOCK,
736 E : BlockDetermineMostLikelyState(&shadow, block1.block_info));
737 E : block1.MarkBlockAsQuarantined();
738 E : EXPECT_EQ(QUARANTINED_BLOCK,
739 E : BlockDetermineMostLikelyState(&shadow, block1.block_info));
740 E : block1.block_info.header->state = ~block1.block_info.header->state;
741 E : EXPECT_EQ(QUARANTINED_BLOCK,
742 E : BlockDetermineMostLikelyState(&shadow, block1.block_info));
743 E : }
744 :
745 : {
746 E : testing::FakeAsanBlock block2(&shadow, kShadowRatio, &cache);
747 E : block2.InitializeBlock(1024);
748 E : EXPECT_EQ(ALLOCATED_BLOCK,
749 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
750 E : block2.block_info.header->state = ~block2.block_info.header->state;
751 E : EXPECT_EQ(ALLOCATED_BLOCK,
752 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
753 E : block2.MarkBlockAsQuarantinedFlooded();
754 E : EXPECT_EQ(QUARANTINED_FLOODED_BLOCK,
755 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
756 E : block2.block_info.header->state = ~block2.block_info.header->state;
757 E : EXPECT_EQ(QUARANTINED_FLOODED_BLOCK,
758 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
759 E : block2.block_info.RawBody(10) = 0;
760 E : EXPECT_EQ(QUARANTINED_FLOODED_BLOCK,
761 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
762 E : ::memset(block2.block_info.body, 0, block2.block_info.body_size);
763 E : EXPECT_EQ(QUARANTINED_BLOCK,
764 E : BlockDetermineMostLikelyState(&shadow, block2.block_info));
765 E : }
766 E : }
767 :
768 E : TEST_F(BlockTest, BitFlips) {
769 E : AsanLogger logger;
770 E : Shadow shadow;
771 E : memory_notifiers::ShadowMemoryNotifier notifier(&shadow);
772 E : StackCaptureCache cache(&logger, ¬ifier);
773 :
774 E : testing::FakeAsanBlock block1(&shadow, kShadowRatio, &cache);
775 E : block1.InitializeBlock(100);
776 E : block1.MarkBlockAsQuarantined();
777 E : size_t flips = 0;
778 :
779 E : EXPECT_TRUE(
780 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 0));
781 E : flips = BlockBitFlipsRequired(QUARANTINED_BLOCK, block1.block_info, 3);
782 E : EXPECT_EQ(0u, flips);
783 :
784 E : block1.block_info.RawHeader(2) ^= 4;
785 E : EXPECT_FALSE(
786 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 0));
787 E : EXPECT_TRUE(
788 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 1));
789 E : flips = BlockBitFlipsRequired(QUARANTINED_BLOCK, block1.block_info, 3);
790 E : EXPECT_EQ(1u, flips);
791 :
792 E : block1.block_info.RawBody(5) ^= 2;
793 E : EXPECT_FALSE(
794 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 0));
795 E : EXPECT_TRUE(
796 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 2));
797 E : flips = BlockBitFlipsRequired(QUARANTINED_BLOCK, block1.block_info, 3);
798 E : EXPECT_LT(0u, flips);
799 E : EXPECT_GE(2u, flips);
800 :
801 E : block1.block_info.RawTrailer(3) ^= 1;
802 E : EXPECT_FALSE(
803 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 0));
804 E : EXPECT_TRUE(
805 E : BlockBitFlipsFixChecksum(QUARANTINED_BLOCK, block1.block_info, 3));
806 E : flips = BlockBitFlipsRequired(QUARANTINED_BLOCK, block1.block_info, 3);
807 E : EXPECT_LT(0u, flips);
808 E : EXPECT_GE(3u, flips);
809 E : }
810 :
811 : } // namespace asan
812 : } // namespace agent
|