1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/block.h"
16 :
17 : #include <algorithm>
18 :
19 : #include "base/hash.h"
20 : #include "base/logging.h"
21 : #include "syzygy/agent/asan/runtime.h"
22 : #include "syzygy/agent/asan/shadow.h"
23 : #include "syzygy/agent/asan/stack_capture_cache.h"
24 : #include "syzygy/common/align.h"
25 :
26 : namespace agent {
27 : namespace asan {
28 :
29 : namespace {
30 :
31 : using ::common::IsAligned;
32 :
33 : // Declares a function that returns the maximum value representable by
34 : // the given bitfield.
35 : #define DECLARE_GET_MAX_BITFIELD_VALUE_FUNCTION(Type, FieldName) \
36 : size_t GetMaxValue ## Type ## _ ## FieldName() { \
37 : Type t = {}; \
38 : t.FieldName = 0; \
39 : --t.FieldName; \
40 : size_t value = t.FieldName; \
41 : return value; \
42 : }
43 E : DECLARE_GET_MAX_BITFIELD_VALUE_FUNCTION(BlockHeader, body_size);
44 : #undef DECLARE_GET_MAX_BITFIELD_VALUE_FUNCTION
45 :
46 E : const size_t kMaxBlockHeaderBodySize = GetMaxValueBlockHeader_body_size();
47 :
48 E : void InitializeBlockHeader(BlockInfo* block_info) {
49 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
50 E : DCHECK_NE(static_cast<BlockHeader*>(NULL), block_info->header);
51 E : ::memset(block_info->header, 0, sizeof(BlockHeader));
52 E : block_info->header->magic = kBlockHeaderMagic;
53 E : block_info->header->is_nested = block_info->is_nested;
54 E : block_info->header->has_header_padding = block_info->header_padding_size > 0;
55 E : block_info->header->has_excess_trailer_padding =
56 : block_info->trailer_padding_size > sizeof(size_t);
57 E : block_info->header->state = ALLOCATED_BLOCK;
58 E : block_info->header->body_size = block_info->body_size;
59 E : }
60 :
61 E : void InitializeBlockHeaderPadding(BlockInfo* block_info) {
62 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
63 E : if (block_info->header_padding_size == 0)
64 E : return;
65 E : DCHECK(IsAligned(block_info->header_padding_size, kShadowRatio));
66 E : DCHECK(IsAligned(block_info->header_padding_size, 2 * sizeof(size_t)));
67 :
68 E : ::memset(block_info->RawHeaderPadding() + sizeof(size_t),
69 : kBlockHeaderPaddingByte,
70 : block_info->header_padding_size - 2 * sizeof(size_t));
71 E : size_t* head = reinterpret_cast<size_t*>(block_info->header_padding);
72 : size_t* tail = reinterpret_cast<size_t*>(block_info->RawHeaderPadding() +
73 E : block_info->header_padding_size -
74 : sizeof(size_t));
75 E : *head = block_info->header_padding_size;
76 E : *tail = block_info->header_padding_size;
77 E : }
78 :
79 E : void InitializeBlockTrailerPadding(BlockInfo* block_info) {
80 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
81 E : if (block_info->trailer_padding_size == 0)
82 E : return;
83 E : ::memset(block_info->trailer_padding, kBlockTrailerPaddingByte,
84 : block_info->trailer_padding_size);
85 E : if (block_info->trailer_padding_size > (kShadowRatio / 2)) {
86 : // This is guaranteed by kShadowRatio being >= 8, but we double check
87 : // for sanity's sake.
88 E : DCHECK_LE(sizeof(uint32_t), block_info->trailer_padding_size);
89 E : size_t* head = reinterpret_cast<size_t*>(block_info->trailer_padding);
90 E : *head = block_info->trailer_padding_size;
91 : }
92 E : }
93 :
94 E : void InitializeBlockTrailer(BlockInfo* block_info) {
95 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
96 E : ::memset(block_info->trailer, 0, sizeof(BlockTrailer));
97 E : block_info->trailer->alloc_ticks = ::GetTickCount();
98 E : block_info->trailer->alloc_tid = ::GetCurrentThreadId();
99 E : }
100 :
101 : // Combines the bits of a uint32_t into the number of bits used to store the
102 : // block checksum.
103 E : uint32_t CombineUInt32IntoBlockChecksum(uint32_t val) {
104 E : uint32_t checksum = 0;
105 E : while (val != 0) {
106 E : checksum ^= val;
107 E : val >>= kBlockHeaderChecksumBits;
108 E : }
109 E : checksum &= ((1 << kBlockHeaderChecksumBits) - 1);
110 E : return checksum;
111 E : }
112 :
113 : // Global callback invoked by exception handlers when exceptions occur. This is
114 : // a testing seam.
115 E : OnExceptionCallback g_on_exception_callback;
116 :
117 : // An exception filter that catches access violations and out of bound accesses.
118 : // This also invokes the OnExceptionCallback if one has been provided.
119 E : DWORD BadMemoryAccessFilter(EXCEPTION_POINTERS* e) {
120 E : if (e->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
121 : e->ExceptionRecord->ExceptionCode == EXCEPTION_ARRAY_BOUNDS_EXCEEDED) {
122 : // Invoke the callback if there is one registered. This has to happen here
123 : // because the exception record lives on the stack in this frame. If we
124 : // access this in the exception handler itself it will be below the bottom
125 : // of the stack and potentially overwritten by the handler's calltree.
126 E : if (!g_on_exception_callback.is_null())
127 E : g_on_exception_callback.Run(e);
128 E : return EXCEPTION_EXECUTE_HANDLER;
129 : }
130 i : return EXCEPTION_CONTINUE_SEARCH;
131 E : }
132 :
133 : bool BlockInfoFromMemoryImpl(const BlockHeader* const_header,
134 E : CompactBlockInfo* block_info) {
135 E : DCHECK_NE(static_cast<BlockHeader*>(nullptr), const_header);
136 E : DCHECK_NE(static_cast<CompactBlockInfo*>(nullptr), block_info);
137 :
138 : // We only perform read operations, but the block_info needs to be populated
139 : // with non-const pointers. Thus, we cast once here to avoid a bunch of casts
140 : // all through this method.
141 E : BlockHeader* header = const_cast<BlockHeader*>(const_header);
142 :
143 : // The raw_block header must be minimally aligned and begin with the expected
144 : // magic.
145 E : if (!IsAligned(reinterpret_cast<uint32_t>(header), kShadowRatio))
146 E : return false;
147 E : if (header->magic != kBlockHeaderMagic)
148 E : return false;
149 :
150 : // Parse the header padding if present.
151 E : uint32_t header_padding_size = 0;
152 E : if (header->has_header_padding) {
153 E : uint8_t* padding = reinterpret_cast<uint8_t*>(header + 1);
154 E : uint32_t* head = reinterpret_cast<uint32_t*>(padding);
155 E : header_padding_size = *head;
156 E : if (header_padding_size < 2 * sizeof(uint32_t))
157 E : return false;
158 E : if (!IsAligned(header_padding_size, kShadowRatio))
159 i : return false;
160 E : uint32_t* tail = reinterpret_cast<uint32_t*>(padding + header_padding_size -
161 : sizeof(uint32_t));
162 E : if (header_padding_size != *tail)
163 E : return false;
164 : }
165 :
166 : // Parse the body.
167 E : uint8_t* body = reinterpret_cast<uint8_t*>(header + 1) + header_padding_size;
168 :
169 : // Parse the trailer padding.
170 E : uint32_t trailer_padding_size = 0;
171 E : if (header->has_excess_trailer_padding) {
172 E : uint32_t* head = reinterpret_cast<uint32_t*>(body + header->body_size);
173 E : trailer_padding_size = *head;
174 E : } else if ((header->body_size % kShadowRatio) != (kShadowRatio / 2)) {
175 E : trailer_padding_size = (kShadowRatio / 2) -
176 : (header->body_size % (kShadowRatio / 2));
177 : }
178 :
179 : // Parse the trailer. The end of it must be 8 aligned.
180 : BlockTrailer* trailer = reinterpret_cast<BlockTrailer*>(
181 E : body + header->body_size + trailer_padding_size);
182 E : if (!IsAligned(reinterpret_cast<uint32_t>(trailer + 1), kShadowRatio))
183 i : return false;
184 :
185 E : block_info->header = header;
186 E : block_info->block_size = reinterpret_cast<uint8_t*>(trailer + 1) -
187 : reinterpret_cast<uint8_t*>(header);
188 E : block_info->header_size = sizeof(BlockHeader) + header_padding_size;
189 E : block_info->trailer_size = trailer_padding_size + sizeof(BlockTrailer);
190 E : block_info->is_nested = header->is_nested;
191 :
192 E : return true;
193 E : }
194 :
195 E : BlockHeader* BlockGetHeaderFromBodyImpl(const BlockBody* const_body) {
196 E : DCHECK_NE(static_cast<BlockBody*>(nullptr), const_body);
197 :
198 E : void* body = const_cast<BlockBody*>(const_body);
199 :
200 : // The header must be appropriately aligned.
201 E : if (!IsAligned(reinterpret_cast<uint32_t>(body), kShadowRatio))
202 E : return NULL;
203 :
204 : // First assume that there is no padding, and check if a valid block header
205 : // is found there.
206 E : BlockHeader* header = reinterpret_cast<BlockHeader*>(body) - 1;
207 E : if (header->magic == kBlockHeaderMagic && header->has_header_padding == 0)
208 E : return header;
209 :
210 : // Otherwise assume there is padding. The padding must be formatted
211 : // correctly and have a valid length.
212 E : uint32_t* tail = reinterpret_cast<uint32_t*>(body) - 1;
213 E : if (*tail == 0 || !IsAligned(*tail, kShadowRatio))
214 E : return NULL;
215 E : uint32_t* head = (tail + 1) - ((*tail) / sizeof(uint32_t));
216 E : if (head > tail)
217 E : return NULL;
218 E : if (*head != *tail)
219 E : return NULL;
220 :
221 : // Expect there to be a valid block header.
222 E : header = reinterpret_cast<BlockHeader*>(head) - 1;
223 E : if (header->magic == kBlockHeaderMagic && header->has_header_padding == 1)
224 E : return header;
225 :
226 : // No valid block header was found before the provided body address.
227 E : return NULL;
228 E : }
229 :
230 : } // namespace
231 :
232 : bool BlockPlanLayout(uint32_t chunk_size,
233 : uint32_t alignment,
234 : uint32_t size,
235 : uint32_t min_left_redzone_size,
236 : uint32_t min_right_redzone_size,
237 E : BlockLayout* layout) {
238 E : DCHECK_LE(kShadowRatio, chunk_size);
239 E : DCHECK(::common::IsPowerOfTwo(chunk_size));
240 E : DCHECK_LE(kShadowRatio, alignment);
241 E : DCHECK_GE(chunk_size, alignment);
242 E : DCHECK(::common::IsPowerOfTwo(alignment));
243 :
244 : // Calculate minimum redzone sizes that respect the parameters.
245 E : uint32_t left_redzone_size = static_cast<uint32_t>(::common::AlignUp(
246 : std::max<uint32_t>(min_left_redzone_size, sizeof(BlockHeader)),
247 : alignment));
248 E : uint32_t right_redzone_size = std::max<uint32_t>(min_right_redzone_size,
249 : sizeof(BlockTrailer));
250 :
251 : // Calculate the total size of the allocation.
252 E : uint32_t total_size = static_cast<uint32_t>(::common::AlignUp(
253 : left_redzone_size + size + right_redzone_size, chunk_size));
254 :
255 E : if (total_size < size)
256 E : return false;
257 :
258 : // Now figure out the sizes of things such that the body of the allocation is
259 : // aligned as close as possible to the beginning of the right redzone while
260 : // respecting the body alignment requirements. This favors catching overflows
261 : // vs underflows when page protection mechanisms are active.
262 E : uint32_t body_trailer_size = size + right_redzone_size;
263 : uint32_t body_trailer_size_aligned = static_cast<uint32_t>(
264 E : ::common::AlignUp(body_trailer_size, alignment));
265 E : uint32_t body_padding_size = body_trailer_size_aligned - body_trailer_size;
266 E : right_redzone_size += body_padding_size;
267 :
268 : // The left redzone takes up the rest of the space.
269 E : left_redzone_size = total_size - right_redzone_size - size;
270 :
271 : // Make sure the basic layout invariants are satisfied.
272 E : DCHECK_LE(min_left_redzone_size, left_redzone_size);
273 E : DCHECK_LE(min_right_redzone_size, right_redzone_size);
274 E : DCHECK_EQ(total_size, (left_redzone_size + size + right_redzone_size));
275 E : DCHECK(IsAligned(total_size, chunk_size));
276 E : DCHECK(IsAligned(left_redzone_size, alignment));
277 :
278 : // Fill out the layout structure.
279 E : layout->block_alignment = chunk_size;
280 E : layout->block_size = static_cast<uint32_t>(total_size);
281 E : layout->header_size = sizeof(BlockHeader);
282 E : layout->header_padding_size = left_redzone_size - sizeof(BlockHeader);
283 E : layout->body_size = size;
284 E : layout->trailer_padding_size = right_redzone_size - sizeof(BlockTrailer);
285 E : layout->trailer_size = sizeof(BlockTrailer);
286 E : return true;
287 E : }
288 :
289 : void BlockInitialize(const BlockLayout& layout,
290 : void* allocation,
291 : bool is_nested,
292 E : BlockInfo* block_info) {
293 E : DCHECK_NE(static_cast<void*>(NULL), allocation);
294 E : DCHECK(IsAligned(reinterpret_cast<uint32_t>(allocation),
295 : layout.block_alignment));
296 :
297 : // If no output structure is provided then use a local one. We need the data
298 : // locally, but the caller might not be interested in it.
299 E : BlockInfo local_block_info = {};
300 E : if (block_info == NULL) {
301 i : block_info = &local_block_info;
302 i : } else {
303 E : ::memset(block_info, 0, sizeof(BlockInfo));
304 : }
305 :
306 : // Get pointers to the various components of the block.
307 E : uint8_t* cursor = reinterpret_cast<uint8_t*>(allocation);
308 E : block_info->block_size = layout.block_size;
309 E : block_info->is_nested = is_nested;
310 E : block_info->header = reinterpret_cast<BlockHeader*>(cursor);
311 E : cursor += sizeof(BlockHeader);
312 E : block_info->header_padding = reinterpret_cast<BlockHeaderPadding*>(cursor);
313 E : cursor += layout.header_padding_size;
314 E : block_info->header_padding_size = layout.header_padding_size;
315 E : block_info->body = reinterpret_cast<BlockBody*>(cursor);
316 E : cursor += layout.body_size;
317 E : block_info->body_size = layout.body_size;
318 E : block_info->trailer_padding = reinterpret_cast<BlockTrailerPadding*>(cursor);
319 E : cursor += layout.trailer_padding_size;
320 E : block_info->trailer_padding_size = layout.trailer_padding_size;
321 E : block_info->trailer = reinterpret_cast<BlockTrailer*>(cursor);
322 :
323 : // Indicates if the block is nested.
324 E : block_info->is_nested = is_nested;
325 :
326 : // If the block information is being returned to the user then determine
327 : // the extents of whole pages within it.
328 E : if (block_info != &local_block_info)
329 E : BlockIdentifyWholePages(block_info);
330 :
331 : // Initialize the various portions of the memory. The body is not initialized
332 : // as this is an unnecessary performance hit.
333 E : InitializeBlockHeader(block_info);
334 E : InitializeBlockHeaderPadding(block_info);
335 E : InitializeBlockTrailerPadding(block_info);
336 E : InitializeBlockTrailer(block_info);
337 E : }
338 :
339 : bool BlockInfoFromMemory(const BlockHeader* header,
340 i : CompactBlockInfo* block_info) {
341 i : DCHECK_NE(static_cast<BlockHeader*>(nullptr), header);
342 i : DCHECK_NE(static_cast<CompactBlockInfo*>(NULL), block_info);
343 :
344 i : __try {
345 : // As little code as possible is inside the body of the __try so that
346 : // our code coverage can instrument it.
347 i : bool result = BlockInfoFromMemoryImpl(header, block_info);
348 i : return result;
349 i : } __except (BadMemoryAccessFilter(GetExceptionInformation())) { // NOLINT
350 : // The block is either corrupt, or the pages are protected.
351 i : return false;
352 : }
353 i : }
354 :
355 E : void ConvertBlockInfo(const CompactBlockInfo& compact, BlockInfo* expanded) {
356 : // Get a byte-aligned pointer to the header for use in calculating pointers to
357 : // various other points to the block.
358 E : uint8_t* block = reinterpret_cast<uint8_t*>(compact.header);
359 :
360 E : expanded->block_size = compact.block_size;
361 E : expanded->header = compact.header;
362 E : expanded->header_padding_size = compact.header_size - sizeof(BlockHeader);
363 E : expanded->header_padding = reinterpret_cast<BlockHeaderPadding*>(
364 : block + sizeof(BlockHeader));
365 E : expanded->body = reinterpret_cast<BlockBody*>(block + compact.header_size);
366 E : expanded->body_size = compact.block_size - compact.header_size -
367 : compact.trailer_size;
368 E : expanded->trailer_padding_size = compact.trailer_size - sizeof(BlockTrailer);
369 E : expanded->trailer_padding = reinterpret_cast<BlockTrailerPadding*>(
370 : block + compact.header_size + expanded->body_size);
371 E : expanded->trailer = reinterpret_cast<BlockTrailer*>(
372 : expanded->RawTrailerPadding() + expanded->trailer_padding_size);
373 E : expanded->is_nested = compact.is_nested;
374 E : BlockIdentifyWholePages(expanded);
375 E : }
376 :
377 E : void ConvertBlockInfo(const BlockInfo& expanded, CompactBlockInfo* compact) {
378 E : DCHECK_NE(static_cast<CompactBlockInfo*>(nullptr), compact);
379 E : compact->header = expanded.header;
380 E : compact->block_size = expanded.block_size;
381 E : compact->header_size = static_cast<uint32_t>(sizeof(BlockHeader) +
382 : expanded.header_padding_size);
383 E : compact->trailer_size = static_cast<uint32_t>(sizeof(BlockTrailer) +
384 : expanded.trailer_padding_size);
385 E : compact->is_nested = expanded.is_nested;
386 E : }
387 :
388 E : bool BlockInfoFromMemory(const BlockHeader* header, BlockInfo* block_info) {
389 E : DCHECK_NE(static_cast<BlockHeader*>(nullptr), header);
390 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
391 E : CompactBlockInfo compact = {};
392 E : if (!BlockInfoFromMemory(header, &compact))
393 E : return false;
394 E : ConvertBlockInfo(compact, block_info);
395 E : return true;
396 E : }
397 :
398 i : BlockHeader* BlockGetHeaderFromBody(const BlockBody* body) {
399 i : DCHECK_NE(static_cast<BlockBody*>(nullptr), body);
400 :
401 i : __try {
402 : // As little code as possible is inside the body of the __try so that
403 : // our code coverage can instrument it.
404 i : BlockHeader* header = BlockGetHeaderFromBodyImpl(body);
405 i : return header;
406 i : } __except (BadMemoryAccessFilter(GetExceptionInformation())) { // NOLINT
407 : // The block is either corrupt, or the pages are protected.
408 i : return nullptr;
409 : }
410 i : }
411 :
412 E : uint32_t BlockCalculateChecksum(const BlockInfo& block_info) {
413 : // It is much easier to calculate the checksum in place so this actually
414 : // causes the block to be modified, but restores the original value.
415 E : uint32_t old_checksum = block_info.header->checksum;
416 E : block_info.header->checksum = 0;
417 E : BlockSetChecksum(block_info);
418 E : uint32_t new_checksum = block_info.header->checksum;
419 E : block_info.header->checksum = old_checksum;
420 E : return new_checksum;
421 E : }
422 :
423 E : bool BlockChecksumIsValid(const BlockInfo& block_info) {
424 E : uint32_t checksum = BlockCalculateChecksum(block_info);
425 E : if (checksum == block_info.header->checksum)
426 E : return true;
427 E : return false;
428 E : }
429 :
430 E : void BlockSetChecksum(const BlockInfo& block_info) {
431 E : block_info.header->checksum = 0;
432 :
433 E : uint32_t checksum = 0;
434 E : switch (static_cast<BlockState>(block_info.header->state)) {
435 : case ALLOCATED_BLOCK:
436 : case QUARANTINED_FLOODED_BLOCK: {
437 : // Only checksum the header and trailer regions.
438 E : checksum = base::SuperFastHash(
439 : reinterpret_cast<const char*>(block_info.header),
440 : block_info.TotalHeaderSize());
441 E : checksum ^= base::SuperFastHash(
442 : reinterpret_cast<const char*>(block_info.trailer_padding),
443 : block_info.TotalTrailerSize());
444 E : break;
445 : }
446 :
447 : // The checksum is the calculated in the same way in these two cases.
448 : case QUARANTINED_BLOCK:
449 : case FREED_BLOCK: {
450 E : checksum = base::SuperFastHash(
451 : reinterpret_cast<const char*>(block_info.header),
452 : block_info.block_size);
453 : break;
454 : }
455 : }
456 :
457 E : checksum = CombineUInt32IntoBlockChecksum(checksum);
458 E : DCHECK_EQ(0u, checksum >> kBlockHeaderChecksumBits);
459 E : block_info.header->checksum = checksum;
460 E : }
461 :
462 E : bool BlockBodyIsFloodFilled(const BlockInfo& block_info) {
463 : // TODO(chrisha): Move the memspn-like function from shadow.cc to a common
464 : // place and reuse it here.
465 E : for (uint32_t i = 0; i < block_info.body_size; ++i) {
466 E : if (block_info.RawBody(i) != kBlockFloodFillByte)
467 E : return false;
468 E : }
469 E : return true;
470 E : }
471 :
472 : // Identifies whole pages in the given block_info.
473 E : void BlockIdentifyWholePages(BlockInfo* block_info) {
474 E : DCHECK_NE(static_cast<BlockInfo*>(NULL), block_info);
475 : static const size_t kPageInfoSize =
476 : FIELD_OFFSET(BlockInfo, is_nested) -
477 : FIELD_OFFSET(BlockInfo, block_pages);
478 :
479 E : if (block_info->block_size < GetPageSize()) {
480 E : ::memset(&block_info->block_pages, 0, kPageInfoSize);
481 E : return;
482 : }
483 :
484 E : uintptr_t alloc_start = reinterpret_cast<uintptr_t>(block_info->header);
485 E : uintptr_t alloc_end = alloc_start + block_info->block_size;
486 E : alloc_start = ::common::AlignUp(alloc_start, GetPageSize());
487 E : alloc_end = ::common::AlignDown(alloc_end, GetPageSize());
488 E : if (alloc_start >= alloc_end) {
489 E : ::memset(&block_info->block_pages, 0, kPageInfoSize);
490 E : return;
491 : }
492 :
493 E : block_info->block_pages = reinterpret_cast<uint8_t*>(alloc_start);
494 E : block_info->block_pages_size = alloc_end - alloc_start;
495 :
496 E : uintptr_t left_redzone_end = reinterpret_cast<uintptr_t>(block_info->body);
497 E : uintptr_t right_redzone_start = left_redzone_end + block_info->body_size;
498 E : left_redzone_end = ::common::AlignDown(left_redzone_end, GetPageSize());
499 E : right_redzone_start = ::common::AlignUp(right_redzone_start, GetPageSize());
500 :
501 E : if (alloc_start < left_redzone_end) {
502 E : block_info->left_redzone_pages = reinterpret_cast<uint8_t*>(alloc_start);
503 E : block_info->left_redzone_pages_size = left_redzone_end - alloc_start;
504 E : } else {
505 E : block_info->left_redzone_pages = nullptr;
506 E : block_info->left_redzone_pages_size = 0;
507 : }
508 :
509 E : if (right_redzone_start < alloc_end) {
510 E : block_info->right_redzone_pages =
511 : reinterpret_cast<uint8_t*>(right_redzone_start);
512 E : block_info->right_redzone_pages_size = alloc_end - right_redzone_start;
513 E : } else {
514 E : block_info->right_redzone_pages = nullptr;
515 E : block_info->right_redzone_pages_size = 0;
516 : }
517 E : }
518 :
519 : namespace {
520 :
521 : // Tries to determine if a block is most likely flood-fill quarantined by
522 : // analyzing the block contents.
523 E : bool BlockIsMostLikelyFloodFilled(const BlockInfo& block_info) {
524 : // Count the number of filled bytes, filled spans and unfilled spans.
525 E : size_t filled = 0;
526 E : size_t filled_spans = 0;
527 E : size_t unfilled_spans = 0;
528 E : bool in_filled_span = false;
529 E : for (uint32_t i = 0; i < block_info.body_size; ++i) {
530 E : bool byte_is_filled = (block_info.RawBody(i) == kBlockFloodFillByte);
531 E : if (byte_is_filled) {
532 E : ++filled;
533 E : if (!in_filled_span) {
534 E : ++filled_spans;
535 E : in_filled_span = true;
536 : }
537 E : } else {
538 E : if (in_filled_span) {
539 E : ++unfilled_spans;
540 E : in_filled_span = false;
541 : }
542 : }
543 E : }
544 :
545 : // A perfectly flood-filled block has |filled| = |body_size|, and
546 : // |filled_spans| = 1. A likely flood-filled block has a low number of
547 : // |filled_spans| and mostly contains |filled| bytes. A block that is very
548 : // likely not flood-filled will have very few |filled| bytes, and somewhere
549 : // near the same number of |filled_spans|. This whole process is imprecise
550 : // and hard to quantify, so the following thresholds are quite arbitrary.
551 :
552 : // Arbitrarily place the threshold for flood-filled bytes at 50%. Consider it
553 : // unlikely that 10 disjoint overwrites have occurred. Also require there to
554 : // be more significantly more filled bytes than spans (twice as many).
555 E : if (filled >= block_info.body_size / 2 && unfilled_spans < 10 &&
556 : filled > filled_spans / 2) {
557 E : return true;
558 : }
559 :
560 E : return false;
561 E : }
562 :
563 : } // namespace
564 :
565 : BlockState BlockDetermineMostLikelyState(const Shadow* shadow,
566 E : const BlockInfo& block_info) {
567 : // If the block has no body then the header has to be trusted.
568 E : if (block_info.body_size == 0)
569 i : return static_cast<BlockState>(block_info.header->state);
570 :
571 : // Use the shadow memory to determine if the body is marked as freed.
572 E : ShadowMarker marker = shadow->GetShadowMarkerForAddress(block_info.body);
573 E : if (marker == kHeapFreedMarker) {
574 : // If the body is freed then the block is more than likely quarantined.
575 : // Do a quick look to see if the block looks mostly flood-filled.
576 E : if (BlockIsMostLikelyFloodFilled(block_info))
577 E : return QUARANTINED_FLOODED_BLOCK;
578 :
579 : // The block may be freed or quarantined. However, the current runtime
580 : // doesn't actually persist freed blocks, so it must be quarantined.
581 E : return QUARANTINED_BLOCK;
582 : }
583 :
584 : // Consider the block to be a live allocation.
585 E : return ALLOCATED_BLOCK;
586 E : }
587 :
588 : namespace {
589 :
590 : // Advances a vector of bitflip locations to the next possible set of bitflip
591 : // locations. Returns true if advancing was possible, false otherwise.
592 : // TODO(chrisha): This can be made drastically more efficient by skipping
593 : // configurations that flip bits in parts of the block that don't
594 : // contribute to the checksum (ie, the body for live and flooded
595 : // allocations).
596 E : bool AdvanceBitFlips(size_t positions, std::vector<uint32_t>* flips) {
597 E : DCHECK_NE(static_cast<std::vector<uint32_t>*>(nullptr), flips);
598 :
599 : // An empty set of bitflips is already exhausted.
600 E : if (flips->empty())
601 E : return false;
602 :
603 : // Advancing stops when all bitflip positions are as far right as they can
604 : // go.
605 E : if (flips->at(0) == positions - flips->size())
606 E : return false;
607 :
608 : // Figure out how many consecutive trailing positions are at their maximums.
609 : // This will terminate before going out of bounds due to the above condition.
610 E : size_t i = 0;
611 E : while (flips->at(flips->size() - i - 1) == positions - i - 1) {
612 E : ++i;
613 E : DCHECK_LT(i, flips->size());
614 E : }
615 :
616 : // Increment the first position that wasn't as far right as it could go, and
617 : // then make the rest of them consecutive.
618 E : size_t j = flips->size() - i - 1;
619 E : ++flips->at(j);
620 E : DCHECK_LT(flips->at(j), positions);
621 E : for (size_t k = j + 1; k < flips->size(); ++k) {
622 E : flips->at(k) = flips->at(k - 1) + 1;
623 E : DCHECK_LT(flips->at(k), positions);
624 E : }
625 :
626 E : return true;
627 E : }
628 :
629 : // Flips the bits at the given positions.
630 E : void FlipBits(const std::vector<uint32_t>& flips, const BlockInfo& block_info) {
631 E : for (uint32_t i = 0; i < flips.size(); ++i) {
632 E : DCHECK_LT(flips[i], block_info.block_size * 8);
633 E : uint32_t byte = flips[i] / 8;
634 E : uint32_t bit = flips[i] % 8;
635 E : uint8_t mask = static_cast<uint8_t>(1u << bit);
636 E : block_info.RawBlock(byte) ^= mask;
637 E : }
638 E : }
639 :
640 : bool BlockBitFlipsFixChecksumImpl(const BlockInfo& block_info,
641 E : size_t bitflips) {
642 E : bitflips = std::min(bitflips, kBlockHeaderChecksumBits);
643 :
644 E : size_t positions = block_info.block_size * 8;
645 :
646 : // Initialize the first possible sequence of bitflips (wrt the generator
647 : // in AdvanceBitFlips).
648 E : std::vector<uint32_t> flips;
649 E : flips.resize(bitflips);
650 E : for (uint32_t i = 0; i < flips.size(); ++i)
651 E : flips[i] = i;
652 :
653 E : while (true) {
654 E : FlipBits(flips, block_info);
655 E : bool valid_checksum = BlockChecksumIsValid(block_info);
656 E : FlipBits(flips, block_info);
657 E : if (valid_checksum)
658 E : return true;
659 :
660 : // When no more sets of bitflips are possible the search has terminated
661 : // with a negative result.
662 E : if (!AdvanceBitFlips(positions, &flips))
663 E : return false;
664 E : }
665 :
666 i : NOTREACHED();
667 i : return false;
668 E : }
669 :
670 : } // namespace
671 :
672 : bool BlockBitFlipsFixChecksum(BlockState block_state,
673 : const BlockInfo& block_info,
674 E : size_t bitflips) {
675 : BlockState old_block_state =
676 E : static_cast<BlockState>(block_info.header->state);
677 E : block_info.header->state = block_state;
678 E : bool result = BlockBitFlipsFixChecksumImpl(block_info, bitflips);
679 E : block_info.header->state = old_block_state;
680 E : return result;
681 E : }
682 :
683 : size_t BlockBitFlipsRequired(BlockState block_state,
684 : const BlockInfo& block_info,
685 E : size_t max_bitflips) {
686 E : max_bitflips = std::min(max_bitflips, kBlockHeaderChecksumBits);
687 E : for (size_t i = 0; i <= max_bitflips; ++i) {
688 E : if (BlockBitFlipsFixChecksum(block_state, block_info, i))
689 E : return i;
690 E : }
691 :
692 i : NOTREACHED();
693 i : return 0;
694 E : }
695 :
696 : // This namespace contains helpers for block analysis.
697 : namespace {
698 :
699 : // Determines if a stack-capture pointer is valid by referring to the
700 : // stack-capture cache in the active runtime.
701 E : bool IsValidStackCapturePointer(const common::StackCapture* stack) {
702 E : if (stack == nullptr)
703 E : return false;
704 E : AsanRuntime* runtime = AsanRuntime::runtime();
705 E : DCHECK_NE(static_cast<AsanRuntime*>(nullptr), runtime);
706 E : StackCaptureCache* cache = runtime->stack_cache();
707 E : DCHECK_NE(static_cast<StackCaptureCache*>(nullptr), cache);
708 E : if (!cache->StackCapturePointerIsValid(stack))
709 E : return false;
710 E : return true;
711 E : }
712 :
713 : // Determines if a thread-id is valid by referring to the cache of thread-ids
714 : // in the runtime.
715 E : bool IsValidThreadId(uint32_t thread_id) {
716 E : AsanRuntime* runtime = AsanRuntime::runtime();
717 E : DCHECK_NE(static_cast<AsanRuntime*>(nullptr), runtime);
718 E : if (!runtime->ThreadIdIsValid(thread_id))
719 E : return false;
720 E : return true;
721 E : }
722 :
723 : // Determines if timestamp is plausible by referring to the process start
724 : // time as recorded by the runtime.
725 E : bool IsValidTicks(uint32_t ticks) {
726 E : uint32_t end = ::GetTickCount();
727 E : AsanRuntime* runtime = AsanRuntime::runtime();
728 E : DCHECK_NE(static_cast<AsanRuntime*>(nullptr), runtime);
729 E : uint32_t begin = runtime->starting_ticks();
730 E : if (ticks < begin || ticks > end)
731 i : return false;
732 E : return true;
733 E : }
734 :
735 : // Determines if a heap id is valid by referring to the runtime.
736 E : bool IsValidHeapId(uint32_t heap_id) {
737 E : AsanRuntime* runtime = AsanRuntime::runtime();
738 E : DCHECK_NE(static_cast<AsanRuntime*>(nullptr), runtime);
739 E : if (!runtime->HeapIdIsValid(heap_id))
740 E : return false;
741 E : return true;
742 E : }
743 :
744 : bool BlockHeaderIsConsistent(BlockState block_state,
745 E : const BlockInfo& block_info) {
746 E : const BlockHeader* h = block_info.header;
747 E : if (h->magic != kBlockHeaderMagic)
748 E : return false;
749 E : if (static_cast<bool>(h->is_nested) != block_info.is_nested)
750 i : return false;
751 :
752 E : bool expect_header_padding = block_info.header_padding_size > 0;
753 E : if (static_cast<bool>(h->has_header_padding) != expect_header_padding)
754 i : return false;
755 :
756 : bool expect_excess_trailer_padding =
757 E : block_info.trailer_padding_size > (kShadowRatio / 2);
758 E : if (static_cast<bool>(h->has_excess_trailer_padding) !=
759 : expect_excess_trailer_padding) {
760 i : return false;
761 : }
762 :
763 E : if (h->body_size != block_info.body_size)
764 i : return false;
765 :
766 : // There should always be a valid allocation stack trace.
767 E : if (!IsValidStackCapturePointer(h->alloc_stack))
768 E : return false;
769 :
770 : // The free stack should be empty if we're in the allocated state.
771 E : if (block_state == ALLOCATED_BLOCK) {
772 E : if (h->free_stack != nullptr)
773 E : return false;
774 E : } else {
775 : // Otherwise there should be a valid free stack.
776 E : if (!IsValidStackCapturePointer(h->free_stack))
777 E : return false;
778 : }
779 :
780 : // If there's no header padding then the block is valid.
781 E : if (block_info.header_padding_size == 0)
782 E : return true;
783 :
784 : // Analyze the block header padding.
785 : const uint32_t* head =
786 E : reinterpret_cast<const uint32_t*>(block_info.header_padding);
787 : const uint32_t* tail =
788 : reinterpret_cast<const uint32_t*>(block_info.RawHeaderPadding() +
789 E : block_info.header_padding_size) -
790 : 1;
791 E : if (*head != block_info.header_padding_size)
792 i : return false;
793 E : if (*tail != block_info.header_padding_size)
794 E : return false;
795 : static const uint32_t kHeaderPaddingValue =
796 : (kBlockHeaderPaddingByte << 24) | (kBlockHeaderPaddingByte << 16) |
797 : (kBlockHeaderPaddingByte << 8) | kBlockHeaderPaddingByte;
798 E : for (++head; head < tail; ++head) {
799 E : if (*head != kHeaderPaddingValue)
800 E : return false;
801 E : }
802 :
803 E : return true;
804 E : }
805 :
806 : // Returns true if the trailer is self-consistent, false otherwise.
807 : // Via |cross_consistent| indicates whether or not the header and trailer
808 : // are consistent with respect to each other.
809 : bool BlockTrailerIsConsistent(BlockState block_state,
810 E : const BlockInfo& block_info) {
811 E : const BlockTrailer* t = block_info.trailer;
812 :
813 : // The allocation data must always be set.
814 E : if (!IsValidThreadId(t->alloc_tid))
815 E : return false;
816 E : if (!IsValidTicks(t->alloc_ticks))
817 i : return false;
818 :
819 : // The free fields must not be set for allocated blocks, and must be set
820 : // otherwise.
821 E : if (block_state == ALLOCATED_BLOCK) {
822 E : if (t->free_tid != 0 || t->free_ticks != 0)
823 E : return false;
824 E : } else {
825 E : if (t->free_tid == 0 || t->free_ticks == 0)
826 E : return false;
827 : }
828 :
829 : // The heap ID must always be set and be valid.
830 E : if (!IsValidHeapId(t->heap_id))
831 E : return false;
832 :
833 : // If there's no padding to check then we're done.
834 E : if (block_info.trailer_padding_size == 0)
835 E : return true;
836 :
837 E : const uint8_t* padding = block_info.RawTrailerPadding();
838 E : size_t size = block_info.trailer_padding_size;
839 :
840 : // If we have excess trailer padding then check the encoded length.
841 E : if (size > (kShadowRatio / 2)) {
842 E : const uint32_t* length = reinterpret_cast<const uint32_t*>(padding);
843 E : if (*length != size)
844 E : return false;
845 E : padding += sizeof(uint32_t);
846 E : size -= sizeof(uint32_t);
847 : }
848 :
849 : // Check the remaining trailer padding to ensure it's appropriately
850 : // flood-filled.
851 E : while (size > 0) {
852 E : if (*padding != kBlockTrailerPaddingByte)
853 E : return false;
854 E : ++padding;
855 E : --size;
856 E : }
857 :
858 E : return true;
859 E : }
860 :
861 : } // namespace
862 :
863 : void BlockAnalyze(BlockState block_state,
864 : const BlockInfo& block_info,
865 E : BlockAnalysisResult* result) {
866 E : DCHECK_NE(static_cast<BlockAnalysisResult*>(nullptr), result);
867 :
868 E : result->block_state = kDataStateUnknown;
869 E : result->header_state = kDataStateUnknown;
870 E : result->body_state = kDataStateUnknown;
871 E : result->trailer_state = kDataStateUnknown;
872 :
873 E : bool checksum_is_valid = BlockChecksumIsValid(block_info);
874 E : if (checksum_is_valid) {
875 E : result->block_state = kDataIsClean;
876 E : result->header_state = kDataIsClean;
877 E : result->body_state = kDataIsClean;
878 E : result->trailer_state = kDataIsClean;
879 :
880 : // Unless the block is flood-filled the checksum is the only thing that
881 : // needs to be checked.
882 E : if (block_state != QUARANTINED_FLOODED_BLOCK)
883 E : return;
884 : }
885 :
886 : // If the block is flood-filled then check the block contents.
887 E : if (block_state == QUARANTINED_FLOODED_BLOCK) {
888 E : if (!BlockBodyIsFloodFilled(block_info)) {
889 E : result->block_state = kDataIsCorrupt;
890 E : result->body_state = kDataIsCorrupt;
891 : }
892 :
893 : // The checksum is valid so the header and footer can be inferred to be
894 : // clean.
895 E : if (checksum_is_valid)
896 E : return;
897 :
898 : // Fall through and let the following logic determine which of the header
899 : // and footer is corrupt.
900 : }
901 :
902 : // At this point it's known that the checksum is invalid, so some part
903 : // of the block is corrupt.
904 E : DCHECK(!checksum_is_valid);
905 E : result->block_state = kDataIsCorrupt;
906 :
907 : // Either the header, the body or the trailer is invalid. We can't
908 : // ever exonerate the body contents, so at the very least its state
909 : // is unknown. Leave it set to unknown.
910 :
911 : // Check the header.
912 E : bool consistent_header = BlockHeaderIsConsistent(block_state, block_info);
913 E : if (!consistent_header) {
914 E : result->header_state = kDataIsCorrupt;
915 E : } else {
916 E : result->header_state = kDataIsClean;
917 : }
918 :
919 : // Check the trailer.
920 E : bool consistent_trailer = BlockTrailerIsConsistent(block_state, block_info);
921 E : if (!consistent_trailer) {
922 E : result->trailer_state = kDataIsCorrupt;
923 E : } else {
924 E : result->trailer_state = kDataIsClean;
925 : }
926 :
927 E : if (consistent_header && consistent_trailer) {
928 : // If both the header and trailer are fine and the body is not *known* to
929 : // be clean, then it is most likely that the header and trailer are clean
930 : // and the body is corrupt. If the body is known to be clean (in the case
931 : // of a flood-filled body) then this is a hash collision and both the
932 : // header and trailer will be marked as suspect.
933 E : if (result->body_state != kDataIsClean) {
934 E : result->body_state = kDataIsCorrupt;
935 E : } else {
936 i : DCHECK_EQ(QUARANTINED_FLOODED_BLOCK, block_state);
937 i : result->header_state = kDataStateUnknown;
938 i : result->trailer_state = kDataStateUnknown;
939 : }
940 : }
941 E : }
942 :
943 E : void SetOnExceptionCallback(OnExceptionCallback callback) {
944 E : g_on_exception_callback = callback;
945 E : }
946 :
947 E : void ClearOnExceptionCallback() {
948 E : g_on_exception_callback.Reset();
949 E : }
950 :
951 : } // namespace asan
952 : } // namespace agent
|