1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/block.h"
16 :
17 : #include <algorithm>
18 :
19 : #include "base/hash.h"
20 : #include "base/logging.h"
21 : #include "syzygy/agent/asan/runtime.h"
22 : #include "syzygy/agent/asan/shadow.h"
23 : #include "syzygy/agent/asan/stack_capture_cache.h"
24 : #include "syzygy/common/align.h"
25 :
26 : namespace agent {
27 : namespace asan {
28 :
29 : namespace {
30 :
31 : using ::common::IsAligned;
32 :
33 : // Declares a function that returns the maximum value representable by
34 : // the given bitfield.
35 : #define DECLARE_GET_MAX_BITFIELD_VALUE_FUNCTION(Type, FieldName) \
36 : size_t GetMaxValue ## Type ## _ ## FieldName() { \
37 : Type t = {}; \
38 : t.FieldName = 0; \
39 : --t.FieldName; \
40 : size_t value = t.FieldName; \
41 : return value; \
42 : }
43 E : DECLARE_GET_MAX_BITFIELD_VALUE_FUNCTION(BlockHeader, body_size);
44 : #undef DECLARE_GET_MAX_BITFIELD_VALUE_FUNCTION
45 :
46 E : const size_t kMaxBlockHeaderBodySize = GetMaxValueBlockHeader_body_size();
47 :
48 E : void InitializeBlockHeader(BlockInfo* block_info) {
49 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
50 E : DCHECK_NE(static_cast<BlockHeader*>(NULL), block_info->header);
51 E : ::memset(block_info->header, 0, sizeof(BlockHeader));
52 E : block_info->header->magic = kBlockHeaderMagic;
53 E : block_info->header->is_nested = block_info->is_nested;
54 E : block_info->header->has_header_padding = block_info->header_padding_size > 0;
55 : block_info->header->has_excess_trailer_padding =
56 E : block_info->trailer_padding_size > sizeof(uint32);
57 E : block_info->header->state = ALLOCATED_BLOCK;
58 E : block_info->header->body_size = block_info->body_size;
59 E : }
60 :
61 E : void InitializeBlockHeaderPadding(BlockInfo* block_info) {
62 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
63 E : if (block_info->header_padding_size == 0)
64 E : return;
65 E : DCHECK(IsAligned(block_info->header_padding_size, kShadowRatio));
66 : DCHECK(IsAligned(block_info->header_padding_size,
67 E : 2 * sizeof(uint32)));
68 :
69 : ::memset(block_info->RawHeaderPadding() + sizeof(uint32),
70 : kBlockHeaderPaddingByte,
71 E : block_info->header_padding_size - 2 * sizeof(uint32));
72 E : uint32* head = reinterpret_cast<uint32*>(block_info->header_padding);
73 : uint32* tail = reinterpret_cast<uint32*>(
74 : block_info->RawHeaderPadding() + block_info->header_padding_size -
75 E : sizeof(uint32));
76 E : *head = block_info->header_padding_size;
77 E : *tail = block_info->header_padding_size;
78 E : }
79 :
80 E : void InitializeBlockTrailerPadding(BlockInfo* block_info) {
81 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
82 E : if (block_info->trailer_padding_size == 0)
83 E : return;
84 : ::memset(block_info->trailer_padding, kBlockTrailerPaddingByte,
85 E : block_info->trailer_padding_size);
86 E : if (block_info->trailer_padding_size > (kShadowRatio / 2)) {
87 : // This is guaranteed by kShadowRatio being >= 8, but we double check
88 : // for sanity's sake.
89 E : DCHECK_LE(sizeof(uint32), block_info->trailer_padding_size);
90 E : uint32* head = reinterpret_cast<uint32*>(block_info->trailer_padding);
91 E : *head = block_info->trailer_padding_size;
92 : }
93 E : }
94 :
95 E : void InitializeBlockTrailer(BlockInfo* block_info) {
96 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
97 E : ::memset(block_info->trailer, 0, sizeof(BlockTrailer));
98 E : block_info->trailer->alloc_ticks = ::GetTickCount();
99 E : block_info->trailer->alloc_tid = ::GetCurrentThreadId();
100 E : }
101 :
102 : // Combines the bits of a uint32 into the number of bits used to store the
103 : // block checksum.
104 E : uint32 CombineUInt32IntoBlockChecksum(uint32 val) {
105 E : uint32 checksum = 0;
106 E : while (val != 0) {
107 E : checksum ^= val;
108 E : val >>= kBlockHeaderChecksumBits;
109 E : }
110 E : checksum &= ((1 << kBlockHeaderChecksumBits) - 1);
111 E : return checksum;
112 E : }
113 :
114 : // Global callback invoked by exception handlers when exceptions occur. This is
115 : // a testing seam.
116 E : OnExceptionCallback g_on_exception_callback;
117 :
118 : // An exception filter that catches access violations and out of bound accesses.
119 : // This also invokes the OnExceptionCallback if one has been provided.
120 E : DWORD BadMemoryAccessFilter(EXCEPTION_POINTERS* e) {
121 : if (e->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
122 E : e->ExceptionRecord->ExceptionCode == EXCEPTION_ARRAY_BOUNDS_EXCEEDED) {
123 : // Invoke the callback if there is one registered. This has to happen here
124 : // because the exception record lives on the stack in this frame. If we
125 : // access this in the exception handler itself it will be below the bottom
126 : // of the stack and potentially overwritten by the handler's calltree.
127 E : if (!g_on_exception_callback.is_null())
128 E : g_on_exception_callback.Run(e);
129 E : return EXCEPTION_EXECUTE_HANDLER;
130 : }
131 i : return EXCEPTION_CONTINUE_SEARCH;
132 E : }
133 :
134 : bool BlockInfoFromMemoryImpl(const BlockHeader* const_header,
135 E : CompactBlockInfo* block_info) {
136 E : DCHECK_NE(static_cast<BlockHeader*>(nullptr), const_header);
137 E : DCHECK_NE(static_cast<CompactBlockInfo*>(nullptr), block_info);
138 :
139 : // We only perform read operations, but the block_info needs to be populated
140 : // with non-const pointers. Thus, we cast once here to avoid a bunch of casts
141 : // all through this method.
142 E : BlockHeader* header = const_cast<BlockHeader*>(const_header);
143 :
144 : // The raw_block header must be minimally aligned and begin with the expected
145 : // magic.
146 E : if (!IsAligned(reinterpret_cast<uint32>(header), kShadowRatio))
147 E : return false;
148 E : if (header->magic != kBlockHeaderMagic)
149 E : return false;
150 :
151 : // Parse the header padding if present.
152 E : uint32 header_padding_size = 0;
153 E : if (header->has_header_padding) {
154 E : uint8* padding = reinterpret_cast<uint8*>(header + 1);
155 E : uint32* head = reinterpret_cast<uint32*>(padding);
156 E : header_padding_size = *head;
157 E : if (header_padding_size < 2 * sizeof(uint32))
158 E : return false;
159 E : if (!IsAligned(header_padding_size, kShadowRatio))
160 i : return false;
161 : uint32* tail = reinterpret_cast<uint32*>(
162 E : padding + header_padding_size - sizeof(uint32));
163 E : if (header_padding_size != *tail)
164 E : return false;
165 : }
166 :
167 : // Parse the body.
168 : uint8* body = reinterpret_cast<uint8*>(header + 1) +
169 E : header_padding_size;
170 :
171 : // Parse the trailer padding.
172 E : uint32 trailer_padding_size = 0;
173 E : if (header->has_excess_trailer_padding) {
174 E : uint32* head = reinterpret_cast<uint32*>(body + header->body_size);
175 E : trailer_padding_size = *head;
176 E : } else if ((header->body_size % kShadowRatio) != (kShadowRatio / 2)) {
177 : trailer_padding_size = (kShadowRatio / 2) -
178 E : (header->body_size % (kShadowRatio / 2));
179 : }
180 :
181 : // Parse the trailer. The end of it must be 8 aligned.
182 : BlockTrailer* trailer = reinterpret_cast<BlockTrailer*>(
183 E : body + header->body_size + trailer_padding_size);
184 E : if (!IsAligned(reinterpret_cast<uint32>(trailer + 1), kShadowRatio))
185 i : return false;
186 :
187 E : block_info->header = header;
188 : block_info->block_size = reinterpret_cast<uint8*>(trailer + 1)
189 E : - reinterpret_cast<uint8*>(header);
190 E : block_info->header_size = sizeof(BlockHeader) + header_padding_size;
191 E : block_info->trailer_size = trailer_padding_size + sizeof(BlockTrailer);
192 E : block_info->is_nested = header->is_nested;
193 :
194 E : return true;
195 E : }
196 :
197 E : BlockHeader* BlockGetHeaderFromBodyImpl(const BlockBody* const_body) {
198 E : DCHECK_NE(static_cast<BlockBody*>(nullptr), const_body);
199 :
200 E : void* body = const_cast<BlockBody*>(const_body);
201 :
202 : // The header must be appropriately aligned.
203 E : if (!IsAligned(reinterpret_cast<uint32>(body), kShadowRatio))
204 E : return NULL;
205 :
206 : // First assume that there is no padding, and check if a valid block header
207 : // is found there.
208 E : BlockHeader* header = reinterpret_cast<BlockHeader*>(body) - 1;
209 E : if (header->magic == kBlockHeaderMagic && header->has_header_padding == 0)
210 E : return header;
211 :
212 : // Otherwise assume there is padding. The padding must be formatted
213 : // correctly and have a valid length.
214 E : uint32* tail = reinterpret_cast<uint32*>(body) - 1;
215 E : if (*tail == 0 || !IsAligned(*tail, kShadowRatio))
216 E : return NULL;
217 E : uint32* head = (tail + 1) - ((*tail) / sizeof(uint32));
218 E : if (head > tail)
219 E : return NULL;
220 E : if (*head != *tail)
221 E : return NULL;
222 :
223 : // Expect there to be a valid block header.
224 E : header = reinterpret_cast<BlockHeader*>(head) - 1;
225 E : if (header->magic == kBlockHeaderMagic && header->has_header_padding == 1)
226 E : return header;
227 :
228 : // No valid block header was found before the provided body address.
229 E : return NULL;
230 E : }
231 :
232 : } // namespace
233 :
234 : bool BlockPlanLayout(size_t chunk_size,
235 : size_t alignment,
236 : size_t size,
237 : size_t min_left_redzone_size,
238 : size_t min_right_redzone_size,
239 E : BlockLayout* layout) {
240 E : DCHECK_LE(kShadowRatio, chunk_size);
241 E : DCHECK(::common::IsPowerOfTwo(chunk_size));
242 E : DCHECK_LE(kShadowRatio, alignment);
243 E : DCHECK_GE(chunk_size, alignment);
244 E : DCHECK(::common::IsPowerOfTwo(alignment));
245 :
246 : // Calculate minimum redzone sizes that respect the parameters.
247 : size_t left_redzone_size = ::common::AlignUp(
248 : std::max(min_left_redzone_size, sizeof(BlockHeader)),
249 E : alignment);
250 : size_t right_redzone_size = std::max(min_right_redzone_size,
251 E : sizeof(BlockTrailer));
252 :
253 : // Calculate the total size of the allocation.
254 : size_t total_size = ::common::AlignUp(
255 E : left_redzone_size + size + right_redzone_size, chunk_size);
256 :
257 E : if (total_size < size)
258 E : return false;
259 :
260 : // Now figure out the sizes of things such that the body of the allocation is
261 : // aligned as close as possible to the beginning of the right redzone while
262 : // respecting the body alignment requirements. This favors catching overflows
263 : // vs underflows when page protection mechanisms are active.
264 E : size_t body_trailer_size = size + right_redzone_size;
265 : size_t body_trailer_size_aligned = ::common::AlignUp(body_trailer_size,
266 E : alignment);
267 E : size_t body_padding_size = body_trailer_size_aligned - body_trailer_size;
268 E : right_redzone_size += body_padding_size;
269 :
270 : // The left redzone takes up the rest of the space.
271 E : left_redzone_size = total_size - right_redzone_size - size;
272 :
273 : // Make sure the basic layout invariants are satisfied.
274 E : DCHECK_LE(min_left_redzone_size, left_redzone_size);
275 E : DCHECK_LE(min_right_redzone_size, right_redzone_size);
276 E : DCHECK_EQ(total_size, (left_redzone_size + size + right_redzone_size));
277 E : DCHECK(IsAligned(total_size, chunk_size));
278 E : DCHECK(IsAligned(left_redzone_size, alignment));
279 :
280 : // Fill out the layout structure.
281 E : layout->block_alignment = chunk_size;
282 E : layout->block_size = total_size;
283 E : layout->header_size = sizeof(BlockHeader);
284 E : layout->header_padding_size = left_redzone_size - sizeof(BlockHeader);
285 E : layout->body_size = size;
286 E : layout->trailer_padding_size = right_redzone_size - sizeof(BlockTrailer);
287 E : layout->trailer_size = sizeof(BlockTrailer);
288 E : return true;
289 E : }
290 :
291 : void BlockInitialize(const BlockLayout& layout,
292 : void* allocation,
293 : bool is_nested,
294 E : BlockInfo* block_info) {
295 E : DCHECK_NE(static_cast<void*>(NULL), allocation);
296 : DCHECK(IsAligned(reinterpret_cast<uint32>(allocation),
297 E : layout.block_alignment));
298 :
299 : // If no output structure is provided then use a local one. We need the data
300 : // locally, but the caller might not be interested in it.
301 E : BlockInfo local_block_info = {};
302 E : if (block_info == NULL) {
303 i : block_info = &local_block_info;
304 i : } else {
305 E : ::memset(block_info, 0, sizeof(BlockInfo));
306 : }
307 :
308 : // Get pointers to the various components of the block.
309 E : uint8* cursor = reinterpret_cast<uint8*>(allocation);
310 E : block_info->block_size = layout.block_size;
311 E : block_info->is_nested = is_nested;
312 E : block_info->header = reinterpret_cast<BlockHeader*>(cursor);
313 E : cursor += sizeof(BlockHeader);
314 E : block_info->header_padding = reinterpret_cast<BlockHeaderPadding*>(cursor);
315 E : cursor += layout.header_padding_size;
316 E : block_info->header_padding_size = layout.header_padding_size;
317 E : block_info->body = reinterpret_cast<BlockBody*>(cursor);
318 E : cursor += layout.body_size;
319 E : block_info->body_size = layout.body_size;
320 E : block_info->trailer_padding = reinterpret_cast<BlockTrailerPadding*>(cursor);
321 E : cursor += layout.trailer_padding_size;
322 E : block_info->trailer_padding_size = layout.trailer_padding_size;
323 E : block_info->trailer = reinterpret_cast<BlockTrailer*>(cursor);
324 :
325 : // Indicates if the block is nested.
326 E : block_info->is_nested = is_nested;
327 :
328 : // If the block information is being returned to the user then determine
329 : // the extents of whole pages within it.
330 E : if (block_info != &local_block_info)
331 E : BlockIdentifyWholePages(block_info);
332 :
333 : // Initialize the various portions of the memory. The body is not initialized
334 : // as this is an unnecessary performance hit.
335 E : InitializeBlockHeader(block_info);
336 E : InitializeBlockHeaderPadding(block_info);
337 E : InitializeBlockTrailerPadding(block_info);
338 E : InitializeBlockTrailer(block_info);
339 E : }
340 :
341 : bool BlockInfoFromMemory(const BlockHeader* header,
342 i : CompactBlockInfo* block_info) {
343 i : DCHECK_NE(static_cast<BlockHeader*>(nullptr), header);
344 i : DCHECK_NE(static_cast<CompactBlockInfo*>(NULL), block_info);
345 :
346 i : __try {
347 : // As little code as possible is inside the body of the __try so that
348 : // our code coverage can instrument it.
349 i : bool result = BlockInfoFromMemoryImpl(header, block_info);
350 i : return result;
351 i : } __except (BadMemoryAccessFilter(GetExceptionInformation())) { // NOLINT
352 : // The block is either corrupt, or the pages are protected.
353 i : return false;
354 i : }
355 i : }
356 :
357 E : void ConvertBlockInfo(const CompactBlockInfo& compact, BlockInfo* expanded) {
358 : // Get a byte-aligned pointer to the header for use in calculating pointers to
359 : // various other points to the block.
360 E : uint8* block = reinterpret_cast<uint8*>(compact.header);
361 :
362 E : expanded->block_size = compact.block_size;
363 E : expanded->header = compact.header;
364 E : expanded->header_padding_size = compact.header_size - sizeof(BlockHeader);
365 : expanded->header_padding = reinterpret_cast<BlockHeaderPadding*>(
366 E : block + sizeof(BlockHeader));
367 E : expanded->body = reinterpret_cast<BlockBody*>(block + compact.header_size);
368 : expanded->body_size = compact.block_size - compact.header_size -
369 E : compact.trailer_size;
370 E : expanded->trailer_padding_size = compact.trailer_size - sizeof(BlockTrailer);
371 : expanded->trailer_padding = reinterpret_cast<BlockTrailerPadding*>(
372 E : block + compact.header_size + expanded->body_size);
373 : expanded->trailer = reinterpret_cast<BlockTrailer*>(
374 E : expanded->RawTrailerPadding() + expanded->trailer_padding_size);
375 E : expanded->is_nested = compact.is_nested;
376 E : BlockIdentifyWholePages(expanded);
377 E : }
378 :
379 E : void ConvertBlockInfo(const BlockInfo& expanded, CompactBlockInfo* compact) {
380 E : DCHECK_NE(static_cast<CompactBlockInfo*>(nullptr), compact);
381 E : compact->header = expanded.header;
382 E : compact->block_size = expanded.block_size;
383 E : compact->header_size = sizeof(BlockHeader) + expanded.header_padding_size;
384 E : compact->trailer_size = sizeof(BlockTrailer) + expanded.trailer_padding_size;
385 E : compact->is_nested = expanded.is_nested;
386 E : }
387 :
388 E : bool BlockInfoFromMemory(const BlockHeader* header, BlockInfo* block_info) {
389 E : DCHECK_NE(static_cast<BlockHeader*>(nullptr), header);
390 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
391 E : CompactBlockInfo compact = {};
392 E : if (!BlockInfoFromMemory(header, &compact))
393 E : return false;
394 E : ConvertBlockInfo(compact, block_info);
395 E : return true;
396 E : }
397 :
398 i : BlockHeader* BlockGetHeaderFromBody(const BlockBody* body) {
399 i : DCHECK_NE(static_cast<BlockBody*>(nullptr), body);
400 :
401 i : __try {
402 : // As little code as possible is inside the body of the __try so that
403 : // our code coverage can instrument it.
404 i : BlockHeader* header = BlockGetHeaderFromBodyImpl(body);
405 i : return header;
406 i : } __except (BadMemoryAccessFilter(GetExceptionInformation())) { // NOLINT
407 : // The block is either corrupt, or the pages are protected.
408 i : return nullptr;
409 i : }
410 i : }
411 :
412 E : uint32 BlockCalculateChecksum(const BlockInfo& block_info) {
413 : // It is much easier to calculate the checksum in place so this actually
414 : // causes the block to be modified, but restores the original value.
415 E : uint32 old_checksum = block_info.header->checksum;
416 E : block_info.header->checksum = 0;
417 E : BlockSetChecksum(block_info);
418 E : uint32 new_checksum = block_info.header->checksum;
419 E : block_info.header->checksum = old_checksum;
420 E : return new_checksum;
421 E : }
422 :
423 E : bool BlockChecksumIsValid(const BlockInfo& block_info) {
424 E : uint32 checksum = BlockCalculateChecksum(block_info);
425 E : if (checksum == block_info.header->checksum)
426 E : return true;
427 E : return false;
428 E : }
429 :
430 E : void BlockSetChecksum(const BlockInfo& block_info) {
431 E : block_info.header->checksum = 0;
432 :
433 E : uint32 checksum = 0;
434 E : switch (static_cast<BlockState>(block_info.header->state)) {
435 : case ALLOCATED_BLOCK:
436 : case QUARANTINED_FLOODED_BLOCK: {
437 : // Only checksum the header and trailer regions.
438 : checksum = base::SuperFastHash(
439 : reinterpret_cast<const char*>(block_info.header),
440 E : block_info.TotalHeaderSize());
441 : checksum ^= base::SuperFastHash(
442 : reinterpret_cast<const char*>(block_info.trailer_padding),
443 E : block_info.TotalTrailerSize());
444 E : break;
445 : }
446 :
447 : // The checksum is the calculated in the same way in these two cases.
448 : case QUARANTINED_BLOCK:
449 : case FREED_BLOCK: {
450 : checksum = base::SuperFastHash(
451 : reinterpret_cast<const char*>(block_info.header),
452 E : block_info.block_size);
453 : break;
454 : }
455 : }
456 :
457 E : checksum = CombineUInt32IntoBlockChecksum(checksum);
458 E : DCHECK_EQ(0u, checksum >> kBlockHeaderChecksumBits);
459 E : block_info.header->checksum = checksum;
460 E : }
461 :
462 E : bool BlockBodyIsFloodFilled(const BlockInfo& block_info) {
463 : // TODO(chrisha): Move the memspn-like function from shadow.cc to a common
464 : // place and reuse it here.
465 E : for (size_t i = 0; i < block_info.body_size; ++i) {
466 E : if (block_info.RawBody(i) != kBlockFloodFillByte)
467 E : return false;
468 E : }
469 E : return true;
470 E : }
471 :
472 : // Identifies whole pages in the given block_info.
473 E : void BlockIdentifyWholePages(BlockInfo* block_info) {
474 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
475 : static const size_t kPageInfoSize =
476 : FIELD_OFFSET(BlockInfo, is_nested) -
477 : FIELD_OFFSET(BlockInfo, block_pages);
478 :
479 E : if (block_info->block_size < GetPageSize()) {
480 E : ::memset(&block_info->block_pages, 0, kPageInfoSize);
481 E : return;
482 : }
483 :
484 E : uint32 alloc_start = reinterpret_cast<uint32>(block_info->header);
485 E : uint32 alloc_end = alloc_start + block_info->block_size;
486 E : alloc_start = ::common::AlignUp(alloc_start, GetPageSize());
487 E : alloc_end = ::common::AlignDown(alloc_end, GetPageSize());
488 E : if (alloc_start >= alloc_end) {
489 E : ::memset(&block_info->block_pages, 0, kPageInfoSize);
490 E : return;
491 : }
492 :
493 E : block_info->block_pages = reinterpret_cast<uint8*>(alloc_start);
494 E : block_info->block_pages_size = alloc_end - alloc_start;
495 :
496 E : uint32 left_redzone_end = reinterpret_cast<uint32>(block_info->body);
497 E : uint32 right_redzone_start = left_redzone_end + block_info->body_size;
498 E : left_redzone_end = ::common::AlignDown(left_redzone_end, GetPageSize());
499 E : right_redzone_start = ::common::AlignUp(right_redzone_start, GetPageSize());
500 :
501 E : if (alloc_start < left_redzone_end) {
502 E : block_info->left_redzone_pages = reinterpret_cast<uint8*>(alloc_start);
503 E : block_info->left_redzone_pages_size = left_redzone_end - alloc_start;
504 E : } else {
505 E : block_info->left_redzone_pages = nullptr;
506 E : block_info->left_redzone_pages_size = 0;
507 : }
508 :
509 E : if (right_redzone_start < alloc_end) {
510 : block_info->right_redzone_pages =
511 E : reinterpret_cast<uint8*>(right_redzone_start);
512 E : block_info->right_redzone_pages_size = alloc_end - right_redzone_start;
513 E : } else {
514 E : block_info->right_redzone_pages = nullptr;
515 E : block_info->right_redzone_pages_size = 0;
516 : }
517 E : }
518 :
519 : namespace {
520 :
521 : // Tries to determine if a block is most likely flood-fill quarantined by
522 : // analyzing the block contents.
523 E : bool BlockIsMostLikelyFloodFilled(const BlockInfo& block_info) {
524 : // Count the number of filled bytes, filled spans and unfilled spans.
525 E : size_t filled = 0;
526 E : size_t filled_spans = 0;
527 E : size_t unfilled_spans = 0;
528 E : bool in_filled_span = false;
529 E : for (size_t i = 0; i < block_info.body_size; ++i) {
530 E : bool byte_is_filled = (block_info.RawBody(i) == kBlockFloodFillByte);
531 E : if (byte_is_filled) {
532 E : ++filled;
533 E : if (!in_filled_span) {
534 E : ++filled_spans;
535 E : in_filled_span = true;
536 : }
537 E : } else {
538 E : if (in_filled_span) {
539 E : ++unfilled_spans;
540 E : in_filled_span = false;
541 : }
542 : }
543 E : }
544 :
545 : // A perfectly flood-filled block has |filled| = |body_size|, and
546 : // |filled_spans| = 1. A likely flood-filled block has a low number of
547 : // |filled_spans| and mostly contains |filled| bytes. A block that is very
548 : // likely not flood-filled will have very few |filled| bytes, and somewhere
549 : // near the same number of |filled_spans|. This whole process is imprecise
550 : // and hard to quantify, so the following thresholds are quite arbitrary.
551 :
552 : // Arbitrarily place the threshold for flood-filled bytes at 50%. Consider it
553 : // unlikely that 10 disjoint overwrites have occurred. Also require there to
554 : // be more significantly more filled bytes than spans (twice as many).
555 : if (filled >= block_info.body_size / 2 && unfilled_spans < 10 &&
556 E : filled > filled_spans / 2) {
557 E : return true;
558 : }
559 :
560 E : return false;
561 E : }
562 :
563 : } // namespace
564 :
565 : BlockState BlockDetermineMostLikelyState(const Shadow* shadow,
566 E : const BlockInfo& block_info) {
567 : // If the block has no body then the header has to be trusted.
568 E : if (block_info.body_size == 0)
569 i : return static_cast<BlockState>(block_info.header->state);
570 :
571 : // Use the shadow memory to determine if the body is marked as freed.
572 E : ShadowMarker marker = shadow->GetShadowMarkerForAddress(block_info.body);
573 E : if (marker == kHeapFreedMarker) {
574 : // If the body is freed then the block is more than likely quarantined.
575 : // Do a quick look to see if the block looks mostly flood-filled.
576 E : if (BlockIsMostLikelyFloodFilled(block_info))
577 E : return QUARANTINED_FLOODED_BLOCK;
578 :
579 : // The block may be freed or quarantined. However, the current runtime
580 : // doesn't actually persist freed blocks, so it must be quarantined.
581 E : return QUARANTINED_BLOCK;
582 : }
583 :
584 : // Consider the block to be a live allocation.
585 E : return ALLOCATED_BLOCK;
586 E : }
587 :
588 : namespace {
589 :
590 : // Advances a vector of bitflip locations to the next possible set of bitflip
591 : // locations. Returns true if advancing was possible, false otherwise.
592 : // TODO(chrisha): This can be made drastically more efficient by skipping
593 : // configurations that flip bits in parts of the block that don't
594 : // contribute to the checksum (ie, the body for live and flooded
595 : // allocations).
596 E : bool AdvanceBitFlips(size_t positions, std::vector<size_t>* flips) {
597 E : DCHECK_NE(static_cast<std::vector<size_t>*>(nullptr), flips);
598 :
599 : // An empty set of bitflips is already exhausted.
600 E : if (flips->empty())
601 E : return false;
602 :
603 : // Advancing stops when all bitflip positions are as far right as they can
604 : // go.
605 E : if (flips->at(0) == positions - flips->size())
606 E : return false;
607 :
608 : // Figure out how many consecutive trailing positions are at their maximums.
609 : // This will terminate before going out of bounds due to the above condition.
610 E : size_t i = 0;
611 E : while (flips->at(flips->size() - i - 1) == positions - i - 1) {
612 E : ++i;
613 E : DCHECK_LT(i, flips->size());
614 E : }
615 :
616 : // Increment the first position that wasn't as far right as it could go, and
617 : // then make the rest of them consecutive.
618 E : size_t j = flips->size() - i - 1;
619 E : ++flips->at(j);
620 E : DCHECK_LT(flips->at(j), positions);
621 E : for (size_t k = j + 1; k < flips->size(); ++k) {
622 E : flips->at(k) = flips->at(k - 1) + 1;
623 E : DCHECK_LT(flips->at(k), positions);
624 E : }
625 :
626 E : return true;
627 E : }
628 :
629 : // Flips the bits at the given positions.
630 E : void FlipBits(const std::vector<size_t>& flips, const BlockInfo& block_info) {
631 E : for (size_t i = 0; i < flips.size(); ++i) {
632 E : DCHECK_LT(flips[i], block_info.block_size * 8);
633 E : size_t byte = flips[i] / 8;
634 E : size_t bit = flips[i] % 8;
635 E : uint8 mask = static_cast<uint8>(1u << bit);
636 E : block_info.RawBlock(byte) ^= mask;
637 E : }
638 E : }
639 :
640 : bool BlockBitFlipsFixChecksumImpl(const BlockInfo& block_info,
641 E : size_t bitflips) {
642 E : bitflips = std::min(bitflips, kBlockHeaderChecksumBits);
643 :
644 E : size_t positions = block_info.block_size * 8;
645 :
646 : // Initialize the first possible sequence of bitflips (wrt the generator
647 : // in AdvanceBitFlips).
648 E : std::vector<size_t> flips;
649 E : flips.resize(bitflips);
650 E : for (size_t i = 0; i < flips.size(); ++i)
651 E : flips[i] = i;
652 :
653 E : while (true) {
654 E : FlipBits(flips, block_info);
655 E : bool valid_checksum = BlockChecksumIsValid(block_info);
656 E : FlipBits(flips, block_info);
657 E : if (valid_checksum)
658 E : return true;
659 :
660 : // When no more sets of bitflips are possible the search has terminated
661 : // with a negative result.
662 E : if (!AdvanceBitFlips(positions, &flips))
663 E : return false;
664 E : }
665 :
666 i : NOTREACHED();
667 i : return false;
668 E : }
669 :
670 : } // namespace
671 :
672 : bool BlockBitFlipsFixChecksum(BlockState block_state,
673 : const BlockInfo& block_info,
674 E : size_t bitflips) {
675 : BlockState old_block_state =
676 E : static_cast<BlockState>(block_info.header->state);
677 E : block_info.header->state = block_state;
678 E : bool result = BlockBitFlipsFixChecksumImpl(block_info, bitflips);
679 E : block_info.header->state = old_block_state;
680 E : return result;
681 E : }
682 :
683 : size_t BlockBitFlipsRequired(BlockState block_state,
684 : const BlockInfo& block_info,
685 E : size_t max_bitflips) {
686 E : max_bitflips = std::min(max_bitflips, kBlockHeaderChecksumBits);
687 E : for (size_t i = 0; i <= max_bitflips; ++i) {
688 E : if (BlockBitFlipsFixChecksum(block_state, block_info, i))
689 E : return i;
690 E : }
691 :
692 i : NOTREACHED();
693 i : return 0;
694 E : }
695 :
696 : // This namespace contains helpers for block analysis.
697 : namespace {
698 :
699 : // Determines if a stack-capture pointer is valid by referring to the
700 : // stack-capture cache in the active runtime.
701 E : bool IsValidStackCapturePointer(const common::StackCapture* stack) {
702 E : if (stack == nullptr)
703 E : return false;
704 E : AsanRuntime* runtime = AsanRuntime::runtime();
705 E : DCHECK_NE(static_cast<AsanRuntime*>(nullptr), runtime);
706 E : StackCaptureCache* cache = runtime->stack_cache();
707 E : DCHECK_NE(static_cast<StackCaptureCache*>(nullptr), cache);
708 E : if (!cache->StackCapturePointerIsValid(stack))
709 E : return false;
710 E : return true;
711 E : }
712 :
713 : // Determines if a thread-id is valid by referring to the cache of thread-ids
714 : // in the runtime.
715 E : bool IsValidThreadId(uint32 thread_id) {
716 E : AsanRuntime* runtime = AsanRuntime::runtime();
717 E : DCHECK_NE(static_cast<AsanRuntime*>(nullptr), runtime);
718 E : if (!runtime->ThreadIdIsValid(thread_id))
719 E : return false;
720 E : return true;
721 E : }
722 :
723 : // Determines if timestamp is plausible by referring to the process start
724 : // time as recorded by the runtime.
725 E : bool IsValidTicks(uint32 ticks) {
726 E : uint32 end = ::GetTickCount();
727 E : AsanRuntime* runtime = AsanRuntime::runtime();
728 E : DCHECK_NE(static_cast<AsanRuntime*>(nullptr), runtime);
729 E : uint32 begin = runtime->starting_ticks();
730 E : if (ticks < begin || ticks > end)
731 i : return false;
732 E : return true;
733 E : }
734 :
735 : // Determines if a heap id is valid by referring to the runtime.
736 E : bool IsValidHeapId(uint32 heap_id) {
737 E : AsanRuntime* runtime = AsanRuntime::runtime();
738 E : DCHECK_NE(static_cast<AsanRuntime*>(nullptr), runtime);
739 E : if (!runtime->HeapIdIsValid(heap_id))
740 E : return false;
741 E : return true;
742 E : }
743 :
744 : bool BlockHeaderIsConsistent(BlockState block_state,
745 E : const BlockInfo& block_info) {
746 E : const BlockHeader* h = block_info.header;
747 E : if (h->magic != kBlockHeaderMagic)
748 E : return false;
749 E : if (static_cast<bool>(h->is_nested) != block_info.is_nested)
750 i : return false;
751 :
752 E : bool expect_header_padding = block_info.header_padding_size > 0;
753 E : if (static_cast<bool>(h->has_header_padding) != expect_header_padding)
754 i : return false;
755 :
756 : bool expect_excess_trailer_padding =
757 E : block_info.trailer_padding_size > (kShadowRatio / 2);
758 : if (static_cast<bool>(h->has_excess_trailer_padding) !=
759 E : expect_excess_trailer_padding) {
760 i : return false;
761 : }
762 :
763 E : if (h->body_size != block_info.body_size)
764 i : return false;
765 :
766 : // There should always be a valid allocation stack trace.
767 E : if (!IsValidStackCapturePointer(h->alloc_stack))
768 E : return false;
769 :
770 : // The free stack should be empty if we're in the allocated state.
771 E : if (block_state == ALLOCATED_BLOCK) {
772 E : if (h->free_stack != nullptr)
773 E : return false;
774 E : } else {
775 : // Otherwise there should be a valid free stack.
776 E : if (!IsValidStackCapturePointer(h->free_stack))
777 E : return false;
778 : }
779 :
780 : // If there's no header padding then the block is valid.
781 E : if (block_info.header_padding_size == 0)
782 E : return true;
783 :
784 : // Analyze the block header padding.
785 : const uint32* head = reinterpret_cast<const uint32*>(
786 E : block_info.header_padding);
787 : const uint32* tail = reinterpret_cast<const uint32*>(
788 E : block_info.RawHeaderPadding() + block_info.header_padding_size) - 1;
789 E : if (*head != block_info.header_padding_size)
790 i : return false;
791 E : if (*tail != block_info.header_padding_size)
792 E : return false;
793 : static const uint32 kHeaderPaddingValue =
794 : (kBlockHeaderPaddingByte << 24) |
795 : (kBlockHeaderPaddingByte << 16) |
796 : (kBlockHeaderPaddingByte << 8) |
797 : kBlockHeaderPaddingByte;
798 E : for (++head; head < tail; ++head) {
799 E : if (*head != kHeaderPaddingValue)
800 E : return false;
801 E : }
802 :
803 E : return true;
804 E : }
805 :
806 : // Returns true if the trailer is self-consistent, false otherwise.
807 : // Via |cross_consistent| indicates whether or not the header and trailer
808 : // are consistent with respect to each other.
809 : bool BlockTrailerIsConsistent(BlockState block_state,
810 E : const BlockInfo& block_info) {
811 E : const BlockTrailer* t = block_info.trailer;
812 :
813 : // The allocation data must always be set.
814 E : if (!IsValidThreadId(t->alloc_tid))
815 E : return false;
816 E : if (!IsValidTicks(t->alloc_ticks))
817 i : return false;
818 :
819 : // The free fields must not be set for allocated blocks, and must be set
820 : // otherwise.
821 E : if (block_state == ALLOCATED_BLOCK) {
822 E : if (t->free_tid != 0 || t->free_ticks != 0)
823 E : return false;
824 E : } else {
825 E : if (t->free_tid == 0 || t->free_ticks == 0)
826 E : return false;
827 : }
828 :
829 : // The heap ID must always be set and be valid.
830 E : if (!IsValidHeapId(t->heap_id))
831 E : return false;
832 :
833 : // If there's no padding to check then we're done.
834 E : if (block_info.trailer_padding_size == 0)
835 E : return true;
836 :
837 E : const uint8* padding = block_info.RawTrailerPadding();
838 E : size_t size = block_info.trailer_padding_size;
839 :
840 : // If we have excess trailer padding then check the encoded length.
841 E : if (size > (kShadowRatio / 2)) {
842 E : const uint32* length = reinterpret_cast<const uint32*>(padding);
843 E : if (*length != size)
844 E : return false;
845 E : padding += sizeof(uint32);
846 E : size -= sizeof(uint32);
847 : }
848 :
849 : // Check the remaining trailer padding to ensure it's appropriately
850 : // flood-filled.
851 E : while (size > 0) {
852 E : if (*padding != kBlockTrailerPaddingByte)
853 E : return false;
854 E : ++padding;
855 E : --size;
856 E : }
857 :
858 E : return true;
859 E : }
860 :
861 : } // namespace
862 :
863 : void BlockAnalyze(BlockState block_state,
864 : const BlockInfo& block_info,
865 E : BlockAnalysisResult* result) {
866 E : DCHECK_NE(static_cast<BlockAnalysisResult*>(nullptr), result);
867 :
868 E : result->block_state = kDataStateUnknown;
869 E : result->header_state = kDataStateUnknown;
870 E : result->body_state = kDataStateUnknown;
871 E : result->trailer_state = kDataStateUnknown;
872 :
873 E : bool checksum_is_valid = BlockChecksumIsValid(block_info);
874 E : if (checksum_is_valid) {
875 E : result->block_state = kDataIsClean;
876 E : result->header_state = kDataIsClean;
877 E : result->body_state = kDataIsClean;
878 E : result->trailer_state = kDataIsClean;
879 :
880 : // Unless the block is flood-filled the checksum is the only thing that
881 : // needs to be checked.
882 E : if (block_state != QUARANTINED_FLOODED_BLOCK)
883 E : return;
884 : }
885 :
886 : // If the block is flood-filled then check the block contents.
887 E : if (block_state == QUARANTINED_FLOODED_BLOCK) {
888 E : if (!BlockBodyIsFloodFilled(block_info)) {
889 E : result->block_state = kDataIsCorrupt;
890 E : result->body_state = kDataIsCorrupt;
891 : }
892 :
893 : // The checksum is valid so the header and footer can be inferred to be
894 : // clean.
895 E : if (checksum_is_valid)
896 E : return;
897 :
898 : // Fall through and let the following logic determine which of the header
899 : // and footer is corrupt.
900 : }
901 :
902 : // At this point it's known that the checksum is invalid, so some part
903 : // of the block is corrupt.
904 E : DCHECK(!checksum_is_valid);
905 E : result->block_state = kDataIsCorrupt;
906 :
907 : // Either the header, the body or the trailer is invalid. We can't
908 : // ever exonerate the body contents, so at the very least its state
909 : // is unknown. Leave it set to unknown.
910 :
911 : // Check the header.
912 E : bool consistent_header = BlockHeaderIsConsistent(block_state, block_info);
913 E : if (!consistent_header) {
914 E : result->header_state = kDataIsCorrupt;
915 E : } else {
916 E : result->header_state = kDataIsClean;
917 : }
918 :
919 : // Check the trailer.
920 E : bool consistent_trailer = BlockTrailerIsConsistent(block_state, block_info);
921 E : if (!consistent_trailer) {
922 E : result->trailer_state = kDataIsCorrupt;
923 E : } else {
924 E : result->trailer_state = kDataIsClean;
925 : }
926 :
927 E : if (consistent_header && consistent_trailer) {
928 : // If both the header and trailer are fine and the body is not *known* to
929 : // be clean, then it is most likely that the header and trailer are clean
930 : // and the body is corrupt. If the body is known to be clean (in the case
931 : // of a flood-filled body) then this is a hash collision and both the
932 : // header and trailer will be marked as suspect.
933 E : if (result->body_state != kDataIsClean) {
934 E : result->body_state = kDataIsCorrupt;
935 E : } else {
936 i : DCHECK_EQ(QUARANTINED_FLOODED_BLOCK, block_state);
937 i : result->header_state = kDataStateUnknown;
938 i : result->trailer_state = kDataStateUnknown;
939 : }
940 : }
941 E : }
942 :
943 E : void SetOnExceptionCallback(OnExceptionCallback callback) {
944 E : g_on_exception_callback = callback;
945 E : }
946 :
947 E : void ClearOnExceptionCallback() {
948 E : g_on_exception_callback.Reset();
949 E : }
950 :
951 : } // namespace asan
952 : } // namespace agent
|