Coverage for /Syzygy/agent/asan/heaps/large_block_heap.cc

CoverageLines executed / instrumented / missingexe / inst / missLanguageGroup
97.2%70720.C++source

Line-by-line coverage:

   1    :  // Copyright 2014 Google Inc. All Rights Reserved.
   2    :  //
   3    :  // Licensed under the Apache License, Version 2.0 (the "License");
   4    :  // you may not use this file except in compliance with the License.
   5    :  // You may obtain a copy of the License at
   6    :  //
   7    :  //     http://www.apache.org/licenses/LICENSE-2.0
   8    :  //
   9    :  // Unless required by applicable law or agreed to in writing, software
  10    :  // distributed under the License is distributed on an "AS IS" BASIS,
  11    :  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12    :  // See the License for the specific language governing permissions and
  13    :  // limitations under the License.
  14    :  
  15    :  #include "syzygy/agent/asan/heaps/large_block_heap.h"
  16    :  
  17    :  #include <windows.h>
  18    :  
  19    :  #include <algorithm>
  20    :  
  21    :  #include "base/logging.h"
  22    :  #include "syzygy/common/align.h"
  23    :  
  24    :  namespace agent {
  25    :  namespace asan {
  26    :  namespace heaps {
  27    :  
  28    :  LargeBlockHeap::LargeBlockHeap(HeapInterface* internal_heap)
  29  E :      : allocs_(HeapAllocator<void*>(internal_heap)) {
  30  E :  }
  31    :  
  32  E :  LargeBlockHeap::~LargeBlockHeap() {
  33    :    // No need to lock here, as concurrent access to an object under destruction
  34    :    // is a programming error.
  35    :    // If there are still allocations in the heap at destruction, then freeing
  36    :    // them here will not clear the associated shadow metadata.
  37  E :    CHECK_EQ(0U, allocs_.size());
  38  E :  }
  39    :  
  40  E :  HeapType LargeBlockHeap::GetHeapType() const {
  41  E :    return kLargeBlockHeap;
  42  E :  }
  43    :  
  44  E :  uint32 LargeBlockHeap::GetHeapFeatures() const {
  45  E :    return kHeapSupportsIsAllocated | kHeapSupportsGetAllocationSize;
  46  E :  }
  47    :  
  48  E :  void* LargeBlockHeap::Allocate(size_t bytes) {
  49    :    // Always allocate some memory so as to guarantee that zero-sized
  50    :    // allocations get an actual distinct address each time.
  51  E :    size_t size = std::max(bytes, 1u);
  52  E :    size = ::common::AlignUp(size, GetPageSize());
  53  E :    void* alloc = ::VirtualAlloc(NULL, size, MEM_COMMIT, PAGE_READWRITE);
  54  E :    Allocation allocation = { alloc, bytes };
  55    :  
  56  E :    if (alloc != NULL) {
  57  E :      ::common::AutoRecursiveLock lock(lock_);
  58    :  
  59  E :      bool inserted = allocs_.insert(allocation).second;
  60  E :      DCHECK(inserted);
  61  E :    }
  62    :  
  63  E :    return alloc;
  64  E :  }
  65    :  
  66  E :  bool LargeBlockHeap::Free(void* alloc) {
  67  E :    Allocation allocation = { alloc, 0 };
  68    :  
  69    :    {
  70    :      // First lookup the allocation to ensure it was made by us.
  71  E :      ::common::AutoRecursiveLock lock(lock_);
  72  E :      AllocationSet::iterator it = allocs_.find(allocation);
  73  E :      if (it == allocs_.end())
  74  i :        return false;
  75  E :      allocs_.erase(it);
  76  E :    }
  77    :  
  78  E :    ::VirtualFree(alloc, 0, MEM_RELEASE);
  79  E :    return true;
  80  E :  }
  81    :  
  82  E :  bool LargeBlockHeap::IsAllocated(const void* alloc) {
  83  E :    Allocation allocation = { alloc, 0 };
  84    :  
  85    :    {
  86  E :      ::common::AutoRecursiveLock lock(lock_);
  87  E :      AllocationSet::iterator it = allocs_.find(allocation);
  88  E :      if (it == allocs_.end())
  89  E :        return false;
  90  E :    }
  91    :  
  92  E :    return true;
  93  E :  }
  94    :  
  95  E :  size_t LargeBlockHeap::GetAllocationSize(const void* alloc) {
  96  E :    Allocation allocation = { alloc, 0 };
  97    :  
  98    :    {
  99  E :      ::common::AutoRecursiveLock lock(lock_);
 100  E :      AllocationSet::iterator it = allocs_.find(allocation);
 101  E :      if (it == allocs_.end())
 102  i :        return kUnknownSize;
 103  E :      return it->size;
 104    :    }
 105  E :  }
 106    :  
 107  E :  void LargeBlockHeap::Lock() {
 108  E :    lock_.Acquire();
 109  E :  }
 110    :  
 111  E :  void LargeBlockHeap::Unlock() {
 112  E :    lock_.Release();
 113  E :  }
 114    :  
 115  E :  bool LargeBlockHeap::TryLock() {
 116  E :    return lock_.Try();
 117  E :  }
 118    :  
 119    :  void* LargeBlockHeap::AllocateBlock(size_t size,
 120    :                                      size_t min_left_redzone_size,
 121    :                                      size_t min_right_redzone_size,
 122  E :                                      BlockLayout* layout) {
 123  E :    DCHECK_NE(static_cast<BlockLayout*>(nullptr), layout);
 124    :  
 125    :    // Plan the layout with full guard pages.
 126  E :    const size_t kPageSize = GetPageSize();
 127    :    if (!BlockPlanLayout(kPageSize, kPageSize, size, kPageSize, kPageSize,
 128  E :                         layout)) {
 129  E :      return nullptr;
 130    :    }
 131  E :    DCHECK_EQ(0u, layout->block_size % kPageSize);
 132    :  
 133  E :    return Allocate(layout->block_size);
 134  E :  }
 135    :  
 136  E :  bool LargeBlockHeap::FreeBlock(const BlockInfo& block_info) {
 137  E :    DCHECK_NE(static_cast<uint8*>(NULL), block_info.block);
 138    :  
 139  E :    return Free(block_info.block);
 140  E :  }
 141    :  
 142    :  }  // namespace heaps
 143    :  }  // namespace asan
 144    :  }  // namespace agent

Coverage information generated Thu Mar 26 16:15:41 2015.