Coverage for /Syzygy/agent/asan/asan_heap.cc

CoverageLines executed / instrumented / missingexe / inst / missLanguageGroup
92.2%4024360.C++source

Line-by-line coverage:

   1    :  // Copyright 2012 Google Inc. All Rights Reserved.
   2    :  //
   3    :  // Licensed under the Apache License, Version 2.0 (the "License");
   4    :  // you may not use this file except in compliance with the License.
   5    :  // You may obtain a copy of the License at
   6    :  //
   7    :  //     http://www.apache.org/licenses/LICENSE-2.0
   8    :  //
   9    :  // Unless required by applicable law or agreed to in writing, software
  10    :  // distributed under the License is distributed on an "AS IS" BASIS,
  11    :  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12    :  // See the License for the specific language governing permissions and
  13    :  // limitations under the License.
  14    :  
  15    :  #include "syzygy/agent/asan/asan_heap.h"
  16    :  
  17    :  #include "base/logging.h"
  18    :  #include "base/string_util.h"
  19    :  #include "base/stringprintf.h"
  20    :  #include "base/time.h"
  21    :  #include "base/debug/alias.h"
  22    :  #include "base/debug/stack_trace.h"
  23    :  #include "base/strings/sys_string_conversions.h"
  24    :  #include "syzygy/agent/asan/asan_logger.h"
  25    :  #include "syzygy/agent/asan/asan_runtime.h"
  26    :  #include "syzygy/agent/asan/asan_shadow.h"
  27    :  #include "syzygy/common/align.h"
  28    :  #include "syzygy/trace/common/clock.h"
  29    :  
  30    :  namespace agent {
  31    :  namespace asan {
  32    :  namespace {
  33    :  
  34    :  typedef StackCapture::StackId StackId;
  35    :  
  36    :  // Utility class which implements an auto lock for a HeapProxy.
  37    :  class HeapLocker {
  38    :   public:
  39  E :    explicit HeapLocker(HeapProxy* const heap) : heap_(heap) {
  40  E :      DCHECK(heap != NULL);
  41  E :      if (!heap->Lock()) {
  42  i :        LOG(ERROR) << "Unable to lock the heap.";
  43    :      }
  44  E :    }
  45    :  
  46  E :    ~HeapLocker() {
  47  E :      DCHECK(heap_ != NULL);
  48  E :      if (!heap_->Unlock()) {
  49  i :        LOG(ERROR) << "Unable to lock the heap.";
  50    :      }
  51  E :    }
  52    :  
  53    :   private:
  54    :    HeapProxy* const heap_;
  55    :  
  56    :    DISALLOW_COPY_AND_ASSIGN(HeapLocker);
  57    :  };
  58    :  
  59    :  // Returns the number of CPU cycles per microsecond.
  60  E :  double GetCpuCyclesPerUs() {
  61  E :    trace::common::TimerInfo tsc_info = {};
  62  E :    trace::common::GetTscTimerInfo(&tsc_info);
  63    :  
  64  E :    if (tsc_info.frequency != 0) {
  65    :      return (tsc_info.frequency /
  66  E :          static_cast<double>(base::Time::kMicrosecondsPerSecond));
  67  i :    } else {
  68  i :      uint64 cycle_start = trace::common::GetTsc();
  69  i :      ::Sleep(HeapProxy::kSleepTimeForApproximatingCPUFrequency);
  70    :      return (trace::common::GetTsc() - cycle_start) /
  71    :          (HeapProxy::kSleepTimeForApproximatingCPUFrequency *
  72  i :               static_cast<double>(base::Time::kMicrosecondsPerSecond));
  73    :    }
  74  E :  }
  75    :  
  76    :  // Verify that the memory range [mem, mem + len[ is accessible.
  77  E :  bool MemoryRangeIsAccessible(uint8* mem, size_t len) {
  78  E :    for (size_t i = 0; i < len; ++i) {
  79  E :      if (!Shadow::IsAccessible(mem + i))
  80  i :        return false;
  81  E :    }
  82  E :    return true;
  83  E :  }
  84    :  
  85    :  }  // namespace
  86    :  
  87    :  double HeapProxy::cpu_cycles_per_us_ = 0.0;
  88    :  // The default quarantine size for a new Heap.
  89    :  size_t HeapProxy::default_quarantine_max_size_ = kDefaultQuarantineMaxSize_;
  90    :  const char* HeapProxy::kHeapUseAfterFree = "heap-use-after-free";
  91    :  const char* HeapProxy::kHeapBufferUnderFlow = "heap-buffer-underflow";
  92    :  const char* HeapProxy::kHeapBufferOverFlow = "heap-buffer-overflow";
  93    :  const char* HeapProxy::kAttemptingDoubleFree = "attempting double-free";
  94    :  const char* HeapProxy::kWildAccess = "wild access";
  95    :  const char* HeapProxy::kHeapUnknownError = "heap-unknown-error";
  96    :  
  97  E :  void ASANDbgCmd(const wchar_t* fmt, ...) {
  98    :    // The string should start with "ASAN" to be interpreted by the debugger as a
  99    :    // command.
 100  E :    std::wstring command_wstring = L"ASAN ";
 101    :    va_list args;
 102  E :    va_start(args, fmt);
 103    :  
 104    :    // Append the actual command to the wstring.
 105  E :    base::StringAppendV(&command_wstring, fmt, args);
 106    :  
 107    :    // Append "; g" to make sure that the debugger continue its execution after
 108    :    // executing this command. This is needed because when the .ocommand function
 109    :    // is used under Windbg the debugger will break on OutputDebugString.
 110  E :    command_wstring.append(L"; g");
 111    :  
 112  E :    OutputDebugString(command_wstring.c_str());
 113  E :  }
 114    :  
 115  E :  void ASANDbgMessage(const wchar_t* fmt, ...) {
 116    :    // Prepend the message with the .echo command so it'll be printed into the
 117    :    // debugger's console.
 118  E :    std::wstring message_wstring = L".echo ";
 119    :    va_list args;
 120  E :    va_start(args, fmt);
 121    :  
 122    :    // Append the actual message to the wstring.
 123  E :    base::StringAppendV(&message_wstring, fmt, args);
 124    :  
 125    :    // Treat the message as a command to print it.
 126  E :    ASANDbgCmd(message_wstring.c_str());
 127  E :  }
 128    :  
 129    :  // Switch to the caller's context and print its stack trace in Windbg.
 130  E :  void ASANDbgPrintContext(const CONTEXT& context) {
 131  E :    ASANDbgMessage(L"Caller's context (%p) and stack trace:", &context);
 132  E :    ASANDbgCmd(L".cxr %p; kv", reinterpret_cast<uint32>(&context));
 133  E :  }
 134    :  
 135    :  HeapProxy::HeapProxy(StackCaptureCache* stack_cache, AsanLogger* logger)
 136    :      : heap_(NULL),
 137    :        stack_cache_(stack_cache),
 138    :        logger_(logger),
 139    :        head_(NULL),
 140    :        tail_(NULL),
 141    :        quarantine_size_(0),
 142  E :        quarantine_max_size_(0) {
 143  E :    DCHECK(stack_cache != NULL);
 144  E :    DCHECK(logger != NULL);
 145  E :  }
 146    :  
 147  E :  HeapProxy::~HeapProxy() {
 148  E :    if (heap_ != NULL)
 149  i :      Destroy();
 150    :  
 151  E :    DCHECK(heap_ == NULL);
 152  E :  }
 153    :  
 154  E :  void HeapProxy::Init() {
 155  E :    default_quarantine_max_size_ = kDefaultQuarantineMaxSize_;
 156  E :  }
 157    :  
 158  E :  HANDLE HeapProxy::ToHandle(HeapProxy* proxy) {
 159  E :    DCHECK(proxy != NULL);
 160  E :    return proxy;
 161  E :  }
 162    :  
 163  E :  HeapProxy* HeapProxy::FromHandle(HANDLE heap) {
 164  E :    DCHECK(heap != NULL);
 165  E :    return reinterpret_cast<HeapProxy*>(heap);
 166  E :  }
 167    :  
 168    :  bool HeapProxy::Create(DWORD options,
 169    :                         size_t initial_size,
 170  E :                         size_t maximum_size) {
 171  E :    DCHECK(heap_ == NULL);
 172    :  
 173  E :    SetQuarantineMaxSize(default_quarantine_max_size_);
 174    :  
 175  E :    HANDLE heap_new = ::HeapCreate(options, initial_size, maximum_size);
 176  E :    if (heap_new == NULL)
 177  E :      return false;
 178    :  
 179  E :    heap_ = heap_new;
 180    :  
 181  E :    return true;
 182  E :  }
 183    :  
 184  E :  bool HeapProxy::Destroy() {
 185  E :    DCHECK(heap_ != NULL);
 186    :  
 187    :    // Flush the quarantine.
 188  E :    SetQuarantineMaxSize(0);
 189    :  
 190  E :    if (::HeapDestroy(heap_)) {
 191  E :      heap_ = NULL;
 192  E :      return true;
 193    :    }
 194    :  
 195  i :    return false;
 196  E :  }
 197    :  
 198  E :  void* HeapProxy::Alloc(DWORD flags, size_t bytes) {
 199  E :    DCHECK(heap_ != NULL);
 200    :  
 201  E :    size_t alloc_size = GetAllocSize(bytes);
 202    :    BlockHeader* block_header =
 203  E :        reinterpret_cast<BlockHeader*>(::HeapAlloc(heap_, flags, alloc_size));
 204    :  
 205  E :    if (block_header == NULL)
 206  i :      return NULL;
 207    :  
 208    :    // Poison head and tail zones, and un-poison alloc.
 209  E :    size_t header_size = sizeof(BlockHeader);
 210  E :    size_t trailer_size = alloc_size - sizeof(BlockHeader) - bytes;
 211  E :    Shadow::Poison(block_header, sizeof(BlockHeader), Shadow::kHeapLeftRedzone);
 212    :  
 213    :    // Capture the current stack. InitFromStack is inlined to preserve the
 214    :    // greatest number of stack frames.
 215  E :    StackCapture stack;
 216  E :    stack.InitFromStack();
 217    :  
 218    :    // Initialize the block fields.
 219  E :    block_header->magic_number = kBlockHeaderSignature;
 220  E :    block_header->block_size = bytes;
 221  E :    block_header->state = ALLOCATED;
 222  E :    block_header->alloc_stack = stack_cache_->SaveStackTrace(stack);
 223  E :    block_header->alloc_tid = ::GetCurrentThreadId();
 224    :  
 225  E :    BlockTrailer* block_trailer = GetBlockTrailer(block_header);
 226  E :    block_trailer->free_stack = NULL;
 227  E :    block_trailer->free_tid = 0;
 228  E :    block_trailer->next_free_block = NULL;
 229    :  
 230  E :    uint8* block_alloc = ToAlloc(block_header);
 231  E :    DCHECK(MemoryRangeIsAccessible(block_alloc, bytes));
 232    :  
 233  E :    Shadow::Poison(block_alloc + bytes, trailer_size, Shadow::kHeapRightRedzone);
 234    :  
 235  E :    return block_alloc;
 236  E :  }
 237    :  
 238  E :  void* HeapProxy::ReAlloc(DWORD flags, void* mem, size_t bytes) {
 239  E :    DCHECK(heap_ != NULL);
 240    :  
 241    :    // Always fail in-place reallocation requests.
 242  E :    if ((flags & HEAP_REALLOC_IN_PLACE_ONLY) != 0)
 243  E :      return NULL;
 244    :  
 245  E :    void *new_mem = Alloc(flags, bytes);
 246    :    // Bail early if the new allocation didn't succeed
 247    :    // and avoid freeing the existing allocation.
 248  E :    if (new_mem == NULL)
 249  i :      return NULL;
 250    :  
 251  E :    if (mem != NULL) {
 252  E :      memcpy(new_mem, mem, std::min(bytes, Size(0, mem)));
 253  E :      Free(flags, mem);
 254    :    }
 255    :  
 256  E :    return new_mem;
 257  E :  }
 258    :  
 259  E :  bool HeapProxy::Free(DWORD flags, void* mem) {
 260  E :    DCHECK(heap_ != NULL);
 261  E :    BlockHeader* block = ToBlockHeader(mem);
 262    :    // The standard allows to call free on a null pointer. ToBlock returns null if
 263    :    // the given pointer is null so we return true here.
 264  E :    if (block == NULL)
 265  i :      return true;
 266    :  
 267    :    // Capture the current stack.
 268  E :    StackCapture stack;
 269  E :    stack.InitFromStack();
 270    :  
 271  E :    if (block->state != ALLOCATED) {
 272    :      // We're not supposed to see another kind of block here, the FREED state
 273    :      // is only applied to block after invalidating their magic number and freed
 274    :      // them.
 275  E :      DCHECK(block->state == QUARANTINED);
 276    :  
 277    :      BadAccessKind bad_access_kind =
 278  E :          GetBadAccessKind(static_cast<const uint8*>(mem), block);
 279  E :      DCHECK_NE(UNKNOWN_BAD_ACCESS, bad_access_kind);
 280    :  
 281  E :      CONTEXT context = {};
 282  E :      ::RtlCaptureContext(&context);
 283  E :      AsanErrorInfo error_info = {};
 284  E :      error_info.error_type = UNKNOWN_BAD_ACCESS;
 285    :  
 286    :      ReportAsanError(kAttemptingDoubleFree, static_cast<const uint8*>(mem),
 287    :                      context, stack, bad_access_kind, block,
 288  E :                      ASAN_UNKNOWN_ACCESS, 0, &error_info);
 289    :  
 290  E :      return false;
 291    :    }
 292    :  
 293  E :    DCHECK(ToAlloc(block) == mem);
 294  E :    BlockTrailer* trailer = GetBlockTrailer(block);
 295  E :    trailer->free_stack = stack_cache_->SaveStackTrace(stack);
 296  E :    trailer->free_timestamp = trace::common::GetTsc();
 297  E :    trailer->free_tid = ::GetCurrentThreadId();
 298    :  
 299    :    // If the size of the allocation is zero then we shouldn't check the shadow
 300    :    // memory as it'll only contain the red-zone for the head and tail of this
 301    :    // block.
 302  E :    if (block->block_size != 0 && !Shadow::IsAccessible(ToAlloc(block)))
 303  i :      return false;
 304    :  
 305  E :    QuarantineBlock(block);
 306  E :    return true;
 307  E :  }
 308    :  
 309  E :  size_t HeapProxy::Size(DWORD flags, const void* mem) {
 310  E :    DCHECK(heap_ != NULL);
 311  E :    BlockHeader* block = ToBlockHeader(mem);
 312  E :    if (block == NULL)
 313  i :      return -1;
 314    :  
 315  E :    return block->block_size;
 316  E :  }
 317    :  
 318  E :  bool HeapProxy::Validate(DWORD flags, const void* mem) {
 319  E :    DCHECK(heap_ != NULL);
 320  E :    return ::HeapValidate(heap_, flags, ToBlockHeader(mem)) == TRUE;
 321  E :  }
 322    :  
 323  E :  size_t HeapProxy::Compact(DWORD flags) {
 324  E :    DCHECK(heap_ != NULL);
 325  E :    return ::HeapCompact(heap_, flags);
 326  E :  }
 327    :  
 328  E :  bool HeapProxy::Lock() {
 329  E :    DCHECK(heap_ != NULL);
 330  E :    return ::HeapLock(heap_) == TRUE;
 331  E :  }
 332    :  
 333  E :  bool HeapProxy::Unlock() {
 334  E :    DCHECK(heap_ != NULL);
 335  E :    return ::HeapUnlock(heap_) == TRUE;
 336  E :  }
 337    :  
 338  E :  bool HeapProxy::Walk(PROCESS_HEAP_ENTRY* entry) {
 339  E :    DCHECK(heap_ != NULL);
 340  E :    return ::HeapWalk(heap_, entry) == TRUE;
 341  E :  }
 342    :  
 343    :  bool HeapProxy::SetInformation(HEAP_INFORMATION_CLASS info_class,
 344    :                                 void* info,
 345  E :                                 size_t info_length) {
 346  E :    DCHECK(heap_ != NULL);
 347  E :    return ::HeapSetInformation(heap_, info_class, info, info_length) == TRUE;
 348  E :  }
 349    :  
 350    :  bool HeapProxy::QueryInformation(HEAP_INFORMATION_CLASS info_class,
 351    :                                   void* info,
 352    :                                   size_t info_length,
 353  E :                                   unsigned long* return_length) {
 354  E :    DCHECK(heap_ != NULL);
 355    :    return ::HeapQueryInformation(heap_,
 356    :                                  info_class,
 357    :                                  info,
 358    :                                  info_length,
 359  E :                                  return_length) == TRUE;
 360  E :  }
 361    :  
 362  E :  void HeapProxy::SetQuarantineMaxSize(size_t quarantine_max_size) {
 363    :    {
 364  E :      base::AutoLock lock(lock_);
 365  E :      quarantine_max_size_ = quarantine_max_size;
 366  E :    }
 367    :  
 368  E :    TrimQuarantine();
 369  E :  }
 370    :  
 371  E :  void HeapProxy::TrimQuarantine() {
 372  E :    while (true) {
 373  E :      BlockHeader* free_block = NULL;
 374  E :      BlockTrailer* trailer = NULL;
 375  E :      size_t alloc_size = 0;
 376    :  
 377    :      // This code runs under a critical lock. Try to keep as much work out of
 378    :      // this scope as possible!
 379    :      {
 380  E :        base::AutoLock lock(lock_);
 381  E :        if (quarantine_size_ <= quarantine_max_size_)
 382  E :          return;
 383    :  
 384  E :        DCHECK(head_ != NULL);
 385  E :        DCHECK(tail_ != NULL);
 386    :  
 387  E :        free_block = head_;
 388  E :        trailer = GetBlockTrailer(free_block);
 389  E :        DCHECK(trailer != NULL);
 390    :  
 391  E :        head_ = trailer->next_free_block;
 392  E :        if (head_ == NULL)
 393  E :          tail_ = NULL;
 394    :  
 395  E :        alloc_size = GetAllocSize(free_block->block_size);
 396    :  
 397  E :        DCHECK_GE(quarantine_size_, alloc_size);
 398  E :        quarantine_size_ -= alloc_size;
 399  E :      }
 400    :  
 401    :      // Return pointers to the stacks for reference counting purposes. We do this
 402    :      // outside of the heap lock to reduce contention.
 403  E :      if (free_block->alloc_stack != NULL) {
 404  E :        stack_cache_->ReleaseStackTrace(free_block->alloc_stack);
 405  E :        free_block->alloc_stack = NULL;
 406    :      }
 407  E :      if (trailer->free_stack != NULL) {
 408  E :        stack_cache_->ReleaseStackTrace(trailer->free_stack);
 409  E :        trailer->free_stack = NULL;
 410    :      }
 411    :  
 412  E :      free_block->state = FREED;
 413  E :      Shadow::Unpoison(free_block, alloc_size);
 414  E :      ::HeapFree(heap_, 0, free_block);
 415  E :    }
 416  E :  }
 417    :  
 418  E :  void HeapProxy::QuarantineBlock(BlockHeader* block) {
 419  E :    DCHECK(block != NULL);
 420    :  
 421  E :    BlockTrailer* free_block_trailer = GetBlockTrailer(block);
 422  E :    DCHECK(free_block_trailer->next_free_block == NULL);
 423  E :    block->state = QUARANTINED;
 424    :  
 425    :    // Poison the released alloc (marked as freed) and quarantine the block.
 426    :    // Note that the original data is left intact. This may make it easier
 427    :    // to debug a crash report/dump on access to a quarantined block.
 428  E :    size_t alloc_size = GetAllocSize(block->block_size);
 429  E :    uint8* mem = ToAlloc(block);
 430  E :    Shadow::MarkAsFreed(mem, block->block_size);
 431    :  
 432    :    {
 433  E :      base::AutoLock lock(lock_);
 434    :  
 435  E :      quarantine_size_ += alloc_size;
 436  E :      if (tail_ != NULL) {
 437  E :        GetBlockTrailer(tail_)->next_free_block = block;
 438  E :      } else {
 439  E :        DCHECK(head_ == NULL);
 440  E :        head_ = block;
 441    :      }
 442  E :      tail_ = block;
 443  E :    }
 444    :  
 445  E :    TrimQuarantine();
 446  E :  }
 447    :  
 448  E :  size_t HeapProxy::GetAllocSize(size_t bytes) {
 449    :    // The Windows heap is 8-byte granular, so there's no gain in a lower
 450    :    // allocation granularity.
 451  E :    const size_t kAllocGranularity = 8;
 452  E :    bytes += sizeof(BlockHeader);
 453  E :    bytes += sizeof(BlockTrailer);
 454  E :    return common::AlignUp(bytes, kAllocGranularity);
 455  E :  }
 456    :  
 457  E :  HeapProxy::BlockHeader* HeapProxy::ToBlockHeader(const void* alloc) {
 458  E :    if (alloc == NULL)
 459  i :      return NULL;
 460    :  
 461  E :    const uint8* mem = static_cast<const uint8*>(alloc);
 462  E :    const BlockHeader* header = reinterpret_cast<const BlockHeader*>(mem) - 1;
 463  E :    if (header->magic_number != kBlockHeaderSignature) {
 464  i :      CONTEXT context = {};
 465  i :      ::RtlCaptureContext(&context);
 466    :  
 467  i :      StackCapture stack;
 468  i :      stack.InitFromStack();
 469    :  
 470  i :      AsanErrorInfo bad_access_info = {};
 471  i :      base::debug::Alias(&bad_access_info);
 472    :  
 473    :      if (!OnBadAccess(mem,
 474    :                       context,
 475    :                       stack,
 476    :                       ASAN_UNKNOWN_ACCESS,
 477    :                       0,
 478  i :                       &bad_access_info)) {
 479  i :        bad_access_info.error_type = UNKNOWN_BAD_ACCESS;
 480    :        ReportAsanErrorBase("unknown bad access",
 481    :                            mem,
 482    :                            context,
 483    :                            stack,
 484    :                            UNKNOWN_BAD_ACCESS,
 485    :                            ASAN_READ_ACCESS,
 486  i :                            0);
 487    :      }
 488  i :      return NULL;
 489    :    }
 490    :  
 491  E :    return const_cast<BlockHeader*>(header);
 492  E :  }
 493    :  
 494  E :  HeapProxy::BlockTrailer* HeapProxy::GetBlockTrailer(const BlockHeader* header) {
 495  E :    DCHECK(header != NULL);
 496  E :    DCHECK_EQ(kBlockHeaderSignature, header->magic_number);
 497    :    // We want the block trailers to be 4 byte aligned after the end of a block.
 498  E :    const size_t kBlockTrailerAlignment = 4;
 499    :  
 500  E :    uint8* mem = reinterpret_cast<uint8*>(const_cast<BlockHeader*>(header));
 501    :    size_t aligned_size =
 502    :        common::AlignUp(sizeof(BlockHeader) + header->block_size,
 503  E :                        kBlockTrailerAlignment);
 504    :  
 505  E :    return reinterpret_cast<BlockTrailer*>(mem + aligned_size);
 506  E :  }
 507    :  
 508  E :  uint8* HeapProxy::ToAlloc(BlockHeader* block) {
 509  E :    DCHECK(block != NULL);
 510  E :    DCHECK_EQ(kBlockHeaderSignature, block->magic_number);
 511  E :    DCHECK(block->state == ALLOCATED || block->state == QUARANTINED);
 512    :  
 513  E :    uint8* mem = reinterpret_cast<uint8*>(block);
 514    :  
 515  E :    return mem + sizeof(BlockHeader);
 516  E :  }
 517    :  
 518    :  void HeapProxy::ReportAddressInformation(const void* addr,
 519    :                                           BlockHeader* header,
 520    :                                           BadAccessKind bad_access_kind,
 521  E :                                           AsanErrorInfo* bad_access_info) {
 522  E :    DCHECK(addr != NULL);
 523  E :    DCHECK(header != NULL);
 524  E :    DCHECK(bad_access_info != NULL);
 525    :  
 526  E :    BlockTrailer* trailer = GetBlockTrailer(header);
 527  E :    DCHECK(trailer != NULL);
 528    :  
 529  E :    uint8* block_alloc = ToAlloc(header);
 530  E :    int offset = 0;
 531  E :    char* offset_relativity = "";
 532  E :    switch (bad_access_kind) {
 533    :      case HEAP_BUFFER_OVERFLOW:
 534    :        offset = static_cast<const uint8*>(addr) - block_alloc
 535  E :            - header->block_size;
 536  E :        offset_relativity = "beyond";
 537  E :        break;
 538    :      case HEAP_BUFFER_UNDERFLOW:
 539  E :        offset = block_alloc - static_cast<const uint8*>(addr);
 540  E :        offset_relativity = "before";
 541  E :        break;
 542    :      case USE_AFTER_FREE:
 543  E :        offset = static_cast<const uint8*>(addr) - block_alloc;
 544  E :        offset_relativity = "inside";
 545  E :        break;
 546    :      default:
 547  i :        NOTREACHED() << "Error trying to dump address information.";
 548    :    }
 549    :  
 550    :    size_t shadow_info_bytes = base::snprintf(
 551    :        bad_access_info->shadow_info,
 552    :        arraysize(bad_access_info->shadow_info) - 1,
 553    :        "%08X is %d bytes %s %d-byte block [%08X,%08X)\n",
 554    :        addr,
 555    :        offset,
 556    :        offset_relativity,
 557    :        header->block_size,
 558    :        block_alloc,
 559  E :        block_alloc + header->block_size);
 560    :  
 561    :    // Ensure that we had enough space to store the full shadow info message.
 562  E :    DCHECK_LE(shadow_info_bytes, arraysize(bad_access_info->shadow_info) - 1);
 563    :  
 564    :    // If we're not writing textual logs we can return here.
 565  E :    if (!logger_->log_as_text())
 566  i :      return;
 567    :  
 568  E :    logger_->Write(bad_access_info->shadow_info);
 569  E :    if (trailer->free_stack != NULL) {
 570    :      std::string message = base::StringPrintf(
 571  E :          "freed here (stack_id=0x%08X):\n", trailer->free_stack->stack_id());
 572    :      logger_->WriteWithStackTrace(message,
 573    :                                   trailer->free_stack->frames(),
 574  E :                                   trailer->free_stack->num_frames());
 575  E :    }
 576  E :    if (header->alloc_stack != NULL) {
 577    :      std::string message = base::StringPrintf(
 578    :          "previously allocated here (stack_id=0x%08X):\n",
 579  E :          header->alloc_stack->stack_id());
 580    :      logger_->WriteWithStackTrace(message,
 581    :                                   header->alloc_stack->frames(),
 582  E :                                   header->alloc_stack->num_frames());
 583  E :    }
 584    :  
 585  E :    std::string shadow_text;
 586  E :    Shadow::AppendShadowMemoryText(addr, &shadow_text);
 587  E :    logger_->Write(shadow_text);
 588  E :  }
 589    :  
 590    :  HeapProxy::BadAccessKind HeapProxy::GetBadAccessKind(const void* addr,
 591  E :                                                       BlockHeader* header) {
 592  E :    DCHECK(addr != NULL);
 593  E :    DCHECK(header != NULL);
 594    :  
 595  E :    BadAccessKind bad_access_kind = UNKNOWN_BAD_ACCESS;
 596    :  
 597  E :    if (header->state == QUARANTINED) {
 598    :      // At this point we can't know if this address belongs to this
 599    :      // quarantined block... If the block containing this address has been
 600    :      // moved from the quarantine list its memory space could have been re-used
 601    :      // and freed again (so having this block in the quarantine list don't
 602    :      // guarantee that this is the original block).
 603    :      // TODO(sebmarchand): Find a way to fix this bug.
 604  E :      bad_access_kind = USE_AFTER_FREE;
 605  E :    } else {
 606  E :      if (addr < (ToAlloc(header)))
 607  E :        bad_access_kind = HEAP_BUFFER_UNDERFLOW;
 608  E :      else if (addr >= (ToAlloc(header) + header->block_size))
 609  E :        bad_access_kind = HEAP_BUFFER_OVERFLOW;
 610    :    }
 611  E :    return bad_access_kind;
 612  E :  }
 613    :  
 614  E :  HeapProxy::BlockHeader* HeapProxy::FindAddressBlock(const void* addr) {
 615  E :    DCHECK(addr != NULL);
 616  E :    PROCESS_HEAP_ENTRY heap_entry = {};
 617  E :    memset(&heap_entry, 0, sizeof(heap_entry));
 618  E :    BlockHeader* header = NULL;
 619    :  
 620    :    // Walk through the heap to find the block containing @p addr.
 621  E :    HeapLocker heap_locker(this);
 622  E :    while (Walk(&heap_entry)) {
 623    :      uint8* entry_upper_bound =
 624  E :          static_cast<uint8*>(heap_entry.lpData) + heap_entry.cbData;
 625    :  
 626  E :      if (heap_entry.lpData <= addr && entry_upper_bound > addr) {
 627  E :        header = reinterpret_cast<BlockHeader*>(heap_entry.lpData);
 628    :        // Ensures that the block have been allocated by this proxy.
 629  E :        if (header->magic_number == kBlockHeaderSignature) {
 630  E :          DCHECK(header->state != FREED);
 631  E :          break;
 632  i :        } else {
 633  E :          header = NULL;
 634    :        }
 635    :      }
 636  E :    }
 637    :  
 638  E :    return header;
 639  E :  }
 640    :  
 641    :  bool HeapProxy::OnBadAccess(const void* addr,
 642    :                              const CONTEXT& context,
 643    :                              const StackCapture& stack,
 644    :                              AccessMode access_mode,
 645    :                              size_t access_size,
 646  E :                              AsanErrorInfo* bad_access_info) {
 647  E :    DCHECK(addr != NULL);
 648  E :    base::AutoLock lock(lock_);
 649  E :    BadAccessKind bad_access_kind = UNKNOWN_BAD_ACCESS;
 650  E :    BlockHeader* header = FindAddressBlock(addr);
 651    :  
 652  E :    if (header == NULL)
 653  i :      return false;
 654    :  
 655  E :    BlockTrailer* trailer = GetBlockTrailer(header);
 656  E :    DCHECK(trailer != NULL);
 657    :  
 658  E :    bad_access_kind = GetBadAccessKind(addr, header);
 659    :    // Get the bad access description if we've been able to determine its kind.
 660  E :    if (bad_access_kind != UNKNOWN_BAD_ACCESS) {
 661  E :      bad_access_info->error_type = bad_access_kind;
 662  E :      bad_access_info->microseconds_since_free = GetTimeSinceFree(header);
 663    :  
 664  E :      const char* bug_descr = AccessTypeToStr(bad_access_kind);
 665  E :      if (header->alloc_stack != NULL) {
 666    :        memcpy(bad_access_info->alloc_stack,
 667    :               header->alloc_stack->frames(),
 668  E :               header->alloc_stack->num_frames() * sizeof(void*));
 669  E :        bad_access_info->alloc_stack_size = header->alloc_stack->num_frames();
 670  E :        bad_access_info->alloc_tid = header->alloc_tid;
 671    :      }
 672  E :      if (trailer->free_stack != NULL) {
 673    :        memcpy(bad_access_info->free_stack,
 674    :               trailer->free_stack->frames(),
 675  E :               trailer->free_stack->num_frames() * sizeof(void*));
 676  E :        bad_access_info->free_stack_size = trailer->free_stack->num_frames();
 677  E :        bad_access_info->free_tid = trailer->free_tid;
 678    :      }
 679    :      ReportAsanError(bug_descr,
 680    :                      addr,
 681    :                      context,
 682    :                      stack,
 683    :                      bad_access_kind,
 684    :                      header,
 685    :                      access_mode,
 686    :                      access_size,
 687  E :                      bad_access_info);
 688  E :      return true;
 689    :    }
 690    :  
 691  i :    return false;
 692  E :  }
 693    :  
 694    :  void HeapProxy::ReportWildAccess(const void* addr,
 695    :                                   const CONTEXT& context,
 696    :                                   const StackCapture& stack,
 697    :                                   AccessMode access_mode,
 698  E :                                   size_t access_size) {
 699  E :    DCHECK(addr != NULL);
 700    :    ReportAsanErrorBase(AccessTypeToStr(WILD_ACCESS),
 701    :                        addr,
 702    :                        context,
 703    :                        stack,
 704    :                        WILD_ACCESS,
 705    :                        access_mode,
 706  E :                        access_size);
 707    :  
 708  E :    ASANDbgPrintContext(context);
 709  E :  }
 710    :  
 711    :  void HeapProxy::ReportAsanError(const char* bug_descr,
 712    :                                  const void* addr,
 713    :                                  const CONTEXT& context,
 714    :                                  const StackCapture& stack,
 715    :                                  BadAccessKind bad_access_kind,
 716    :                                  BlockHeader* header,
 717    :                                  AccessMode access_mode,
 718    :                                  size_t access_size,
 719  E :                                  AsanErrorInfo* bad_access_info) {
 720  E :    DCHECK(bug_descr != NULL);
 721  E :    DCHECK(addr != NULL);
 722  E :    DCHECK(header != NULL);
 723    :  
 724  E :    BlockTrailer* trailer = GetBlockTrailer(header);
 725  E :    DCHECK(trailer != NULL);
 726    :  
 727    :    ReportAsanErrorBase(bug_descr,
 728    :                        addr,
 729    :                        context,
 730    :                        stack,
 731    :                        bad_access_kind,
 732    :                        access_mode,
 733  E :                        access_size);
 734    :  
 735    :    // Print the Windbg information to display the allocation stack if present.
 736  E :    if (header->alloc_stack != NULL) {
 737  E :      ASANDbgMessage(L"Allocation stack trace:");
 738    :      ASANDbgCmd(L"dps %p l%d",
 739    :                 header->alloc_stack->frames(),
 740  E :                 header->alloc_stack->num_frames());
 741    :    }
 742    :  
 743    :    // Print the Windbg information to display the free stack if present.
 744  E :    if (trailer->free_stack != NULL) {
 745  E :      ASANDbgMessage(L"Free stack trace:");
 746    :      ASANDbgCmd(L"dps %p l%d",
 747    :                 trailer->free_stack->frames(),
 748  E :                 trailer->free_stack->num_frames());
 749    :    }
 750    :  
 751  E :    ReportAddressInformation(addr, header, bad_access_kind, bad_access_info);
 752    :  
 753  E :    ASANDbgPrintContext(context);
 754  E :  }
 755    :  
 756    :  void HeapProxy::ReportAsanErrorBase(const char* bug_descr,
 757    :                                      const void* addr,
 758    :                                      const CONTEXT& context,
 759    :                                      const StackCapture& stack,
 760    :                                      BadAccessKind bad_access_kind,
 761    :                                      AccessMode access_mode,
 762  E :                                      size_t access_size) {
 763  E :    DCHECK(bug_descr != NULL);
 764  E :    DCHECK(addr != NULL);
 765    :  
 766    :    // If we're not logging text
 767  E :    if (!logger_->log_as_text())
 768  i :      return;
 769    :  
 770    :    // Print the base of the Windbg help message.
 771    :    ASANDbgMessage(L"An Asan error has been found (%ls), here are the details:",
 772  E :                   base::SysUTF8ToWide(bug_descr).c_str());
 773    :  
 774    :    // TODO(sebmarchand): Print PC, BP and SP.
 775    :    std::string output(base::StringPrintf(
 776    :        "SyzyASAN error: %s on address 0x%08X (stack_id=0x%08X)\n",
 777  E :        bug_descr, addr, stack.stack_id()));
 778  E :    if (access_mode != ASAN_UNKNOWN_ACCESS) {
 779  E :      const char* access_mode_str = NULL;
 780  E :      if (access_mode == ASAN_READ_ACCESS)
 781  E :        access_mode_str = "READ";
 782  E :      else
 783  E :        access_mode_str = "WRITE";
 784    :      base::StringAppendF(&output,
 785    :                          "%s of size %d at 0x%08X\n",
 786    :                          access_mode_str,
 787  E :                          access_size);
 788    :    }
 789    :  
 790    :    // Log the failure and stack.
 791  E :    logger_->WriteWithContext(output, context);
 792  E :  }
 793    :  
 794  E :  const char* HeapProxy::AccessTypeToStr(BadAccessKind bad_access_kind) {
 795  E :    switch (bad_access_kind) {
 796    :      case USE_AFTER_FREE:
 797  E :        return kHeapUseAfterFree;
 798    :      case HEAP_BUFFER_UNDERFLOW:
 799  E :        return kHeapBufferUnderFlow;
 800    :      case HEAP_BUFFER_OVERFLOW:
 801  E :        return kHeapBufferOverFlow;
 802    :      case WILD_ACCESS:
 803  E :        return kWildAccess;
 804    :      case UNKNOWN_BAD_ACCESS:
 805  i :        return kHeapUnknownError;
 806    :      default:
 807  i :        NOTREACHED() << "Unexpected bad access kind.";
 808  i :        return NULL;
 809    :    }
 810  E :  }
 811    :  
 812  E :  LIST_ENTRY* HeapProxy::ToListEntry(HeapProxy* proxy) {
 813  E :    DCHECK(proxy != NULL);
 814  E :    return &proxy->list_entry_;
 815  E :  }
 816    :  
 817  E :  HeapProxy* HeapProxy::FromListEntry(LIST_ENTRY* list_entry) {
 818  E :    DCHECK(list_entry != NULL);
 819  E :    return CONTAINING_RECORD(list_entry, HeapProxy, list_entry_);
 820  E :  }
 821    :  
 822  E :  uint64 HeapProxy::GetTimeSinceFree(const BlockHeader* header) {
 823  E :    DCHECK(header != NULL);
 824    :  
 825  E :    if (header->state == ALLOCATED)
 826  E :      return 0;
 827    :  
 828  E :    BlockTrailer* trailer = GetBlockTrailer(header);
 829  E :    DCHECK(trailer != NULL);
 830    :  
 831  E :    uint64 cycles_since_free = trace::common::GetTsc() - trailer->free_timestamp;
 832    :  
 833    :    // On x86/64, as long as cpu_cycles_per_us_ is 64-bit aligned, the write is
 834    :    // atomic, which means we don't care about multiple writers since it's not an
 835    :    // update based on the previous value.
 836  E :    if (cpu_cycles_per_us_ == 0.0)
 837  E :      cpu_cycles_per_us_ = GetCpuCyclesPerUs();
 838  E :    DCHECK_NE(0.0, cpu_cycles_per_us_);
 839    :  
 840  E :    return cycles_since_free / cpu_cycles_per_us_;
 841  E :  }
 842    :  
 843    :  }  // namespace asan
 844    :  }  // namespace agent

Coverage information generated Thu Jul 04 09:34:53 2013.