Coverage for /Syzygy/agent/asan/asan_heap_unittest.cc

CoverageLines executed / instrumented / missingexe / inst / missLanguageGroup
99.1%6666720.C++test

Line-by-line coverage:

   1    :  // Copyright 2012 Google Inc. All Rights Reserved.
   2    :  //
   3    :  // Licensed under the Apache License, Version 2.0 (the "License");
   4    :  // you may not use this file except in compliance with the License.
   5    :  // You may obtain a copy of the License at
   6    :  //
   7    :  //     http://www.apache.org/licenses/LICENSE-2.0
   8    :  //
   9    :  // Unless required by applicable law or agreed to in writing, software
  10    :  // distributed under the License is distributed on an "AS IS" BASIS,
  11    :  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12    :  // See the License for the specific language governing permissions and
  13    :  // limitations under the License.
  14    :  
  15    :  #include "syzygy/agent/asan/asan_heap.h"
  16    :  
  17    :  #include <algorithm>
  18    :  
  19    :  #include "base/bits.h"
  20    :  #include "base/rand_util.h"
  21    :  #include "base/sha1.h"
  22    :  #include "gtest/gtest.h"
  23    :  #include "syzygy/agent/asan/asan_logger.h"
  24    :  #include "syzygy/agent/asan/asan_runtime.h"
  25    :  #include "syzygy/agent/asan/asan_shadow.h"
  26    :  #include "syzygy/agent/asan/unittest_util.h"
  27    :  #include "syzygy/common/align.h"
  28    :  #include "syzygy/trace/common/clock.h"
  29    :  
  30    :  namespace agent {
  31    :  namespace asan {
  32    :  
  33    :  namespace {
  34    :  
  35    :  // A derived class to expose protected members for unit-testing.
  36    :  class TestShadow : public Shadow {
  37    :   public:
  38    :    using Shadow::kShadowSize;
  39    :    using Shadow::shadow_;
  40    :  };
  41    :  
  42    :  // A derived class to expose protected members for unit-testing.
  43    :  class TestHeapProxy : public HeapProxy {
  44    :   public:
  45    :    using HeapProxy::BlockHeader;
  46    :    using HeapProxy::BlockTrailer;
  47    :    using HeapProxy::AsanPointerToUserPointer;
  48    :    using HeapProxy::AsanPointerToBlockHeader;
  49    :    using HeapProxy::BlockHeaderToAsanPointer;
  50    :    using HeapProxy::BlockHeaderToBlockTrailer;
  51    :    using HeapProxy::BlockHeaderToUserPointer;
  52    :    using HeapProxy::FindBlockContainingAddress;
  53    :    using HeapProxy::FindContainingBlock;
  54    :    using HeapProxy::FindContainingFreedBlock;
  55    :    using HeapProxy::GetAllocSize;
  56    :    using HeapProxy::GetBadAccessKind;
  57    :    using HeapProxy::GetTimeSinceFree;
  58    :    using HeapProxy::InitializeAsanBlock;
  59    :    using HeapProxy::UserPointerToBlockHeader;
  60    :    using HeapProxy::UserPointerToAsanPointer;
  61    :    using HeapProxy::kDefaultAllocGranularityLog;
  62    :  
  63  E :    TestHeapProxy() { }
  64    :  
  65    :    // Calculates the underlying allocation size for an allocation of @p bytes.
  66    :    // This assume a granularity of @p kDefaultAllocGranularity bytes.
  67  E :    static size_t GetAllocSize(size_t bytes) {
  68  E :      return GetAllocSize(bytes, kDefaultAllocGranularity);
  69  E :    }
  70    :  
  71    :    // Verify that the access to @p addr contained in @p header is an underflow.
  72  E :    bool IsUnderflowAccess(uint8* addr, BlockHeader* header) {
  73  E :      return GetBadAccessKind(addr, header) == HEAP_BUFFER_UNDERFLOW;
  74  E :    }
  75    :  
  76    :    // Verify that the access to @p addr contained in @p header is an overflow.
  77  E :    bool IsOverflowAccess(uint8* addr, BlockHeader* header) {
  78  E :      return GetBadAccessKind(addr, header) == HEAP_BUFFER_OVERFLOW;
  79  E :    }
  80    :  
  81    :    // Verify that the access to @p addr contained in @p header is an use after
  82    :    // free.
  83  E :    bool IsUseAfterAccess(uint8* addr, BlockHeader* header) {
  84  E :      return GetBadAccessKind(addr, header) == USE_AFTER_FREE;
  85  E :    }
  86    :  
  87  E :    bool IsAllocated(BlockHeader* header) {
  88  E :      EXPECT_TRUE(header != NULL);
  89  E :      return header->state == ALLOCATED;
  90  E :    }
  91    :  
  92  E :    bool IsQuarantined(BlockHeader* header) {
  93  E :      EXPECT_TRUE(header != NULL);
  94  E :      return header->state == QUARANTINED;
  95  E :    }
  96    :  
  97  E :    bool IsFreed(BlockHeader* header) {
  98  E :      EXPECT_TRUE(header != NULL);
  99  E :      return header->state == FREED;
 100  E :    }
 101    :  
 102  E :    static void MarkBlockHeaderAsQuarantined(BlockHeader* header) {
 103  E :      EXPECT_TRUE(header != NULL);
 104  E :      StackCapture stack;
 105  E :      stack.InitFromStack();
 106  E :      header->free_stack = stack_cache_->SaveStackTrace(stack);
 107  E :      header->state = QUARANTINED;
 108  E :    }
 109    :  
 110  E :    static void MarkBlockHeaderAsAllocated(BlockHeader* header) {
 111  E :      EXPECT_TRUE(header != NULL);
 112  E :      header->free_stack = NULL;
 113  E :      header->state = ALLOCATED;
 114  E :    }
 115    :  
 116    :    // Determines if the address @p mem corresponds to a block in quarantine.
 117  E :    bool InQuarantine(const void* mem) {
 118  E :      base::AutoLock lock(lock_);
 119  E :      BlockHeader* current_block = head_;
 120  E :      while (current_block != NULL) {
 121    :        void* block_alloc = static_cast<void*>(
 122  E :            BlockHeaderToUserPointer(current_block));
 123  E :        EXPECT_TRUE(block_alloc != NULL);
 124  E :        if (block_alloc == mem) {
 125  E :          EXPECT_TRUE(current_block->state == QUARANTINED);
 126  E :          return true;
 127    :        }
 128  E :        current_block = BlockHeaderToBlockTrailer(current_block)->next_free_block;
 129  E :      }
 130  E :      return false;
 131  E :    }
 132    :  };
 133    :  
 134    :  class HeapTest : public testing::TestWithAsanLogger {
 135    :   public:
 136  E :    HeapTest() : stack_cache_(&logger_) {
 137  E :    }
 138    :  
 139  E :    virtual void SetUp() OVERRIDE {
 140  E :      testing::TestWithAsanLogger::SetUp();
 141    :  
 142  E :      HeapProxy::Init(&stack_cache_);
 143  E :      Shadow::SetUp();
 144    :  
 145  E :      logger_.set_instance_id(instance_id());
 146  E :      logger_.Init();
 147  E :      ASSERT_TRUE(proxy_.Create(0, 0, 0));
 148  E :    }
 149    :  
 150  E :    virtual void TearDown() OVERRIDE {
 151  E :      ASSERT_TRUE(proxy_.Destroy());
 152  E :      Shadow::TearDown();
 153  E :      testing::TestWithAsanLogger::TearDown();
 154  E :    }
 155    :  
 156    :    // Verifies that [alloc, alloc + size) is accessible, and that
 157    :    // [alloc - 1] and [alloc+size] are poisoned.
 158  E :    void VerifyAllocAccess(void* alloc, size_t size) {
 159  E :      uint8* mem = reinterpret_cast<uint8*>(alloc);
 160  E :      ASSERT_FALSE(Shadow::IsAccessible(mem - 1));
 161  E :      ASSERT_EQ(Shadow::GetShadowMarkerForAddress(mem - 1),
 162    :                Shadow::kHeapLeftRedzone);
 163  E :      for (size_t i = 0; i < size; ++i)
 164  E :        ASSERT_TRUE(Shadow::IsAccessible(mem + i));
 165  E :      ASSERT_FALSE(Shadow::IsAccessible(mem + size));
 166  E :    }
 167    :  
 168    :    // Verifies that [alloc-1, alloc+size] is poisoned.
 169  E :    void VerifyFreedAccess(void* alloc, size_t size) {
 170  E :      uint8* mem = reinterpret_cast<uint8*>(alloc);
 171  E :      ASSERT_FALSE(Shadow::IsAccessible(mem - 1));
 172  E :      ASSERT_EQ(Shadow::GetShadowMarkerForAddress(mem - 1),
 173    :                Shadow::kHeapLeftRedzone);
 174  E :      for (size_t i = 0; i < size; ++i) {
 175  E :        ASSERT_FALSE(Shadow::IsAccessible(mem + i));
 176  E :        ASSERT_EQ(Shadow::GetShadowMarkerForAddress(mem + i),
 177    :                  Shadow::kHeapFreedByte);
 178  E :      }
 179  E :      ASSERT_FALSE(Shadow::IsAccessible(mem + size));
 180  E :    }
 181    :  
 182  E :    void RandomSetMemory(void* alloc, size_t size) {
 183  E :      base::RandBytes(alloc, size);
 184  E :    }
 185    :  
 186    :   protected:
 187    :    // Arbitrary constant for all size limit.
 188    :    static const size_t kMaxAllocSize = 134584;
 189    :  
 190    :    AsanLogger logger_;
 191    :    StackCaptureCache stack_cache_;
 192    :    TestHeapProxy proxy_;
 193    :  };
 194    :  
 195    :  }  // namespace
 196    :  
 197  E :  TEST_F(HeapTest, ToFromHandle) {
 198  E :    HANDLE handle = HeapProxy::ToHandle(&proxy_);
 199  E :    ASSERT_TRUE(handle != NULL);
 200  E :    ASSERT_EQ(&proxy_, HeapProxy::FromHandle(handle));
 201  E :  }
 202    :  
 203  E :  TEST_F(HeapTest, SetQuarantineMaxSize) {
 204  E :    size_t quarantine_size = proxy_.quarantine_max_size() * 2;
 205    :    // Increments the quarantine max size if it was set to 0.
 206  E :    if (quarantine_size == 0)
 207  i :      quarantine_size++;
 208  E :    proxy_.SetQuarantineMaxSize(quarantine_size);
 209  E :    ASSERT_EQ(quarantine_size, proxy_.quarantine_max_size());
 210  E :  }
 211    :  
 212  E :  TEST_F(HeapTest, PopOnSetQuarantineMaxSize) {
 213  E :    const size_t kAllocSize = 100;
 214  E :    const size_t real_alloc_size = TestHeapProxy::GetAllocSize(kAllocSize);
 215  E :    LPVOID mem = proxy_.Alloc(0, kAllocSize);
 216  E :    ASSERT_FALSE(proxy_.InQuarantine(mem));
 217  E :    proxy_.SetQuarantineMaxSize(real_alloc_size);
 218  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 219    :    // The quarantine is just large enough to keep this block.
 220  E :    ASSERT_TRUE(proxy_.InQuarantine(mem));
 221    :    // We resize the quarantine to a smaller size, the block should pop out.
 222  E :    proxy_.SetQuarantineMaxSize(real_alloc_size - 1);
 223  E :    ASSERT_FALSE(proxy_.InQuarantine(mem));
 224  E :  }
 225    :  
 226  E :  TEST_F(HeapTest, Quarantine) {
 227  E :    const size_t kAllocSize = 100;
 228  E :    const size_t real_alloc_size = TestHeapProxy::GetAllocSize(kAllocSize);
 229  E :    const size_t number_of_allocs = 16;
 230  E :    proxy_.SetQuarantineMaxSize(real_alloc_size * number_of_allocs);
 231    :  
 232  E :    LPVOID mem = proxy_.Alloc(0, kAllocSize);
 233  E :    ASSERT_TRUE(mem != NULL);
 234  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 235    :    // Allocate a bunch of blocks until the first one is pushed out of the
 236    :    // quarantine.
 237  E :    for (size_t i = 0; i < number_of_allocs; ++i) {
 238  E :      ASSERT_TRUE(proxy_.InQuarantine(mem));
 239  E :      LPVOID mem2 = proxy_.Alloc(0, kAllocSize);
 240  E :      ASSERT_TRUE(mem2 != NULL);
 241  E :      ASSERT_TRUE(proxy_.Free(0, mem2));
 242  E :      ASSERT_TRUE(proxy_.InQuarantine(mem2));
 243  E :    }
 244    :  
 245  E :    ASSERT_FALSE(proxy_.InQuarantine(mem));
 246  E :  }
 247    :  
 248  E :  TEST_F(HeapTest, UnpoisonsQuarantine) {
 249  E :    const size_t kAllocSize = 100;
 250  E :    const size_t real_alloc_size = TestHeapProxy::GetAllocSize(kAllocSize);
 251  E :    proxy_.SetQuarantineMaxSize(real_alloc_size);
 252    :  
 253    :    // Allocate a memory block and directly free it, this puts it in the
 254    :    // quarantine.
 255  E :    void* mem = proxy_.Alloc(0, kAllocSize);
 256  E :    ASSERT_TRUE(mem != NULL);
 257  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 258  E :    ASSERT_TRUE(proxy_.InQuarantine(mem));
 259    :  
 260    :    // Assert that the shadow memory has been correctly poisoned.
 261    :    intptr_t mem_start = reinterpret_cast<intptr_t>(
 262  E :        proxy_.UserPointerToBlockHeader(mem));
 263  E :    ASSERT_EQ(0, (mem_start & 7) );
 264  E :    size_t shadow_start = mem_start >> 3;
 265  E :    size_t shadow_alloc_size = real_alloc_size >> 3;
 266  E :    for (size_t i = shadow_start; i < shadow_start + shadow_alloc_size; ++i) {
 267  E :      ASSERT_NE(TestShadow::kHeapAddressableByte, TestShadow::shadow_[i]);
 268  E :    }
 269    :  
 270    :    // Flush the quarantine.
 271  E :    proxy_.SetQuarantineMaxSize(0);
 272    :  
 273    :    // Assert that the quarantine has been correctly unpoisoned.
 274  E :    for (size_t i = shadow_start; i < shadow_start + shadow_alloc_size; ++i) {
 275  E :      ASSERT_EQ(TestShadow::kHeapAddressableByte, TestShadow::shadow_[i]);
 276  E :    }
 277  E :  }
 278    :  
 279  E :  TEST_F(HeapTest, Realloc) {
 280  E :    const size_t kAllocSize = 100;
 281    :    // As a special case, a realloc with a NULL input should succeed.
 282  E :    LPVOID mem = proxy_.ReAlloc(0, NULL, kAllocSize);
 283  E :    ASSERT_TRUE(mem != NULL);
 284  E :    mem = proxy_.ReAlloc(0, mem, kAllocSize + 5);
 285  E :    ASSERT_TRUE(mem != NULL);
 286    :  
 287    :    // We always fail reallocs with the in-place flag.
 288    :    ASSERT_EQ(NULL,
 289  E :              proxy_.ReAlloc(HEAP_REALLOC_IN_PLACE_ONLY, NULL, kAllocSize));
 290    :    ASSERT_EQ(NULL,
 291  E :              proxy_.ReAlloc(HEAP_REALLOC_IN_PLACE_ONLY, mem, kAllocSize - 10));
 292    :    ASSERT_EQ(NULL,
 293  E :              proxy_.ReAlloc(HEAP_REALLOC_IN_PLACE_ONLY, mem, kAllocSize + 10));
 294    :  
 295  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 296  E :  }
 297    :  
 298  E :  TEST_F(HeapTest, AllocFree) {
 299  E :    const size_t kAllocSize = 100;
 300  E :    LPVOID mem = proxy_.Alloc(0, kAllocSize);
 301  E :    ASSERT_TRUE(mem != NULL);
 302  E :    ASSERT_EQ(kAllocSize, proxy_.Size(0, mem));
 303  E :    const size_t kReAllocSize = 2 * kAllocSize;
 304  E :    mem = proxy_.ReAlloc(0, mem, kReAllocSize);
 305  E :    ASSERT_EQ(kReAllocSize, proxy_.Size(0, mem));
 306  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 307  E :  }
 308    :  
 309  E :  TEST_F(HeapTest, DoubleFree) {
 310  E :    const size_t kAllocSize = 100;
 311    :    // Ensure that the quarantine is large enough to keep this block, this is
 312    :    // needed for the use-after-free check.
 313  E :    proxy_.SetQuarantineMaxSize(TestHeapProxy::GetAllocSize(kAllocSize));
 314  E :    LPVOID mem = proxy_.Alloc(0, kAllocSize);
 315  E :    ASSERT_TRUE(mem != NULL);
 316  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 317  E :    ASSERT_TRUE(proxy_.IsQuarantined(proxy_.UserPointerToBlockHeader(mem)));
 318  E :    ASSERT_FALSE(proxy_.Free(0, mem));
 319  E :  }
 320    :  
 321  E :  TEST_F(HeapTest, AllocsAccessibility) {
 322    :    // Ensure that the quarantine is large enough to keep the allocated blocks in
 323    :    // this test.
 324  E :    proxy_.SetQuarantineMaxSize(kMaxAllocSize * 2);
 325  E :    for (size_t size = 10; size < kMaxAllocSize; size = size * 5 + 123) {
 326    :      // Do an alloc/realloc/free and test that access is correctly managed.
 327  E :      void* mem = proxy_.Alloc(0, size);
 328  E :      ASSERT_TRUE(mem != NULL);
 329  E :      ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(mem, size));
 330  E :      RandomSetMemory(mem, size);
 331    :  
 332  E :      size_t new_size = size;
 333  E :      while (new_size == size)
 334  E :        new_size = base::RandInt(size / 2, size * 2);
 335    :  
 336  E :      unsigned char sha1_before[base::kSHA1Length] = {};
 337    :      base::SHA1HashBytes(reinterpret_cast<unsigned char*>(mem),
 338    :                          std::min(size, new_size),
 339  E :                          sha1_before);
 340    :  
 341  E :      void* new_mem = proxy_.ReAlloc(0, mem, size * 2);
 342  E :      ASSERT_TRUE(new_mem != NULL);
 343  E :      ASSERT_NE(mem, new_mem);
 344    :  
 345  E :      unsigned char sha1_after[base::kSHA1Length] = {};
 346    :      base::SHA1HashBytes(reinterpret_cast<unsigned char*>(new_mem),
 347    :                          std::min(size, new_size),
 348  E :                          sha1_after);
 349  E :      ASSERT_EQ(0, memcmp(sha1_before, sha1_after, base::kSHA1Length));
 350    :  
 351  E :      ASSERT_NO_FATAL_FAILURE(VerifyFreedAccess(mem, size));
 352  E :      ASSERT_NO_FATAL_FAILURE(VerifyAllocAccess(new_mem, size * 2));
 353    :  
 354  E :      ASSERT_TRUE(proxy_.Free(0, new_mem));
 355  E :      ASSERT_NO_FATAL_FAILURE(VerifyFreedAccess(new_mem, size * 2));
 356  E :    }
 357  E :  }
 358    :  
 359  E :  TEST_F(HeapTest, AllocZeroBytes) {
 360  E :    void* mem1 = proxy_.Alloc(0, 0);
 361  E :    ASSERT_TRUE(mem1 != NULL);
 362  E :    void* mem2 = proxy_.Alloc(0, 0);
 363  E :    ASSERT_TRUE(mem2 != NULL);
 364  E :    ASSERT_NE(mem1, mem2);
 365  E :    ASSERT_TRUE(proxy_.Free(0, mem1));
 366  E :    ASSERT_TRUE(proxy_.Free(0, mem2));
 367  E :  }
 368    :  
 369  E :  TEST_F(HeapTest, Size) {
 370  E :    for (size_t size = 10; size < kMaxAllocSize; size = size * 5 + 123) {
 371  E :      void* mem = proxy_.Alloc(0, size);
 372  E :      ASSERT_FALSE(mem == NULL);
 373  E :      ASSERT_EQ(size, proxy_.Size(0, mem));
 374  E :      ASSERT_TRUE(proxy_.Free(0, mem));
 375  E :    }
 376  E :  }
 377    :  
 378  E :  TEST_F(HeapTest, Validate) {
 379  E :    for (size_t size = 10; size < kMaxAllocSize; size = size * 5 + 123) {
 380  E :      void* mem = proxy_.Alloc(0, size);
 381  E :      ASSERT_FALSE(mem == NULL);
 382  E :      ASSERT_TRUE(proxy_.Validate(0, mem));
 383  E :      ASSERT_TRUE(proxy_.Free(0, mem));
 384  E :    }
 385  E :  }
 386    :  
 387  E :  TEST_F(HeapTest, Compact) {
 388    :    // Compact should return a non-zero size.
 389  E :    ASSERT_LT(0U, proxy_.Compact(0));
 390    :  
 391    :    // TODO(siggi): It may not be possible to allocate the size returned due
 392    :    //     to padding - fix and test.
 393  E :  }
 394    :  
 395  E :  TEST_F(HeapTest, LockUnlock) {
 396    :    // We can't really test these, aside from not crashing.
 397  E :    ASSERT_TRUE(proxy_.Lock());
 398  E :    ASSERT_TRUE(proxy_.Unlock());
 399  E :  }
 400    :  
 401  E :  TEST_F(HeapTest, Walk) {
 402    :    // We assume at least two entries to walk through.
 403  E :    PROCESS_HEAP_ENTRY entry = {};
 404  E :    ASSERT_TRUE(proxy_.Walk(&entry));
 405  E :    ASSERT_TRUE(proxy_.Walk(&entry));
 406  E :  }
 407    :  
 408  E :  TEST_F(HeapTest, SetQueryInformation) {
 409  E :    ULONG compat_flag = -1;
 410  E :    unsigned long ret = 0;
 411    :    // Get the current value of the compat flag.
 412    :    ASSERT_TRUE(
 413    :        proxy_.QueryInformation(HeapCompatibilityInformation,
 414  E :                                &compat_flag, sizeof(compat_flag), &ret));
 415  E :    ASSERT_EQ(sizeof(compat_flag), ret);
 416  E :    ASSERT_NE(~0U, compat_flag);
 417    :  
 418    :    // Put the heap in LFH, which should always succeed, except when a debugger
 419    :    // is attached. When a debugger is attached, the heap is wedged in certain
 420    :    // debug settings.
 421  E :    if (base::debug::BeingDebugged()) {
 422  i :      LOG(WARNING) << "Can't test HeapProxy::SetInformation under debugger.";
 423  i :      return;
 424    :    }
 425    :  
 426  E :    compat_flag = 2;
 427    :    ASSERT_TRUE(
 428    :        proxy_.SetInformation(HeapCompatibilityInformation,
 429  E :                              &compat_flag, sizeof(compat_flag)));
 430  E :  }
 431    :  
 432    :  namespace {
 433    :  
 434    :  // Here's the block layout created in this fixture:
 435    :  // +-----+------+-----+-----+-----+-----+-----+-----+-----+------+-----+-----+
 436    :  // |     |      |     | BH3 | DB3 | BT3 | BH4 | DB4 | BT4 | GAP2 |     |     |
 437    :  // |     | GAP1 | BH2 +-----+-----+-----+-----+-----+-----+------+ BT2 |     |
 438    :  // | BH1 |      |     |                   DB2                    |     | BT1 |
 439    :  // |     |------+-----+------------------------------------------+-----+     |
 440    :  // |     |                             DB1                             |     |
 441    :  // +-----+-------------------------------------------------------------+-----+
 442    :  // Legend:
 443    :  //   - BHX: Block header of the block X.
 444    :  //   - DBX: Data block of the block X.
 445    :  //   - BTX: Block trailer of the block X.
 446    :  //   - GAP1: Memory gap between the header of block 1 and that of block 2. This
 447    :  //     is due to the fact that block 2 has a non standard alignment and the
 448    :  //     beginning of its header is aligned to this value.
 449    :  //   - GAP2: Memory gap between block 4 and the trailer of block 2.
 450    :  // Remarks:
 451    :  //   - Block 1, 3 and 4 are 8 bytes aligned.
 452    :  //   - Block 2 is 64 bytes aligned.
 453    :  //   - Block 3 and 4 are both contained in block 2, which is contained in
 454    :  //     block 1.
 455    :  class NestedBlocksTest : public HeapTest {
 456    :   public:
 457    :    typedef HeapTest Super;
 458    :  
 459  E :    virtual void SetUp() OVERRIDE {
 460  E :      Super::SetUp();
 461    :  
 462  E :      InitializeBlockLayout();
 463  E :    }
 464    :  
 465  E :    virtual void TearDown() OVERRIDE {
 466    :      Shadow::Unpoison(aligned_buffer_,
 467  E :                       kBufferSize - (aligned_buffer_ - buffer_));
 468  E :      Super::TearDown();
 469  E :    }
 470    :  
 471  E :    void InitializeBlockLayout() {
 472    :      inner_blocks_size_ =
 473  E :          TestHeapProxy::GetAllocSize(kInternalAllocSize, kInnerBlockAlignment);
 474    :      block_2_size_ = TestHeapProxy::GetAllocSize(
 475  E :          inner_blocks_size_ * 2 + kGapSize, kBlock2Alignment);
 476  E :      const size_t kAlignMaxGap = kBlock2Alignment;
 477    :      block_1_size_ = TestHeapProxy::GetAllocSize(block_2_size_ + kAlignMaxGap,
 478  E :                                                  kBlock1Alignment);
 479    :  
 480    :      aligned_buffer_ = reinterpret_cast<uint8*>(common::AlignUp(
 481  E :          reinterpret_cast<size_t>(buffer_), Shadow::kShadowGranularity));
 482    :  
 483  E :      ASSERT_GT(kBufferSize - (aligned_buffer_ - buffer_), block_1_size_);
 484    :  
 485  E :      StackCapture stack;
 486  E :      stack.InitFromStack();
 487    :  
 488    :      // Initialize block 1.
 489    :      data_block_1_ = reinterpret_cast<uint8*>(HeapProxy::InitializeAsanBlock(
 490    :          aligned_buffer_,
 491    :          block_2_size_ + kAlignMaxGap,
 492    :          block_1_size_,
 493    :          base::bits::Log2Floor(kBlock1Alignment),
 494  E :          stack));
 495  E :      ASSERT_NE(reinterpret_cast<uint8*>(NULL), data_block_1_);
 496  E :      block_1_ = TestHeapProxy::UserPointerToBlockHeader(data_block_1_);
 497  E :      ASSERT_NE(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL), block_1_);
 498    :  
 499    :      size_t data_block_1_aligned = common::AlignUp(reinterpret_cast<size_t>(
 500  E :          data_block_1_), kBlock2Alignment);
 501    :      // Initialize block 2.
 502    :      data_block_2_ = reinterpret_cast<uint8*>(HeapProxy::InitializeAsanBlock(
 503    :          reinterpret_cast<uint8*>(data_block_1_aligned),
 504    :          inner_blocks_size_ * 2 + kGapSize,
 505    :          block_2_size_,
 506    :          base::bits::Log2Floor(kBlock2Alignment),
 507  E :          stack));
 508  E :      ASSERT_NE(reinterpret_cast<uint8*>(NULL), data_block_2_);
 509  E :      block_2_ = TestHeapProxy::UserPointerToBlockHeader(data_block_2_);
 510  E :      ASSERT_NE(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL), block_2_);
 511    :  
 512    :      // Initialize block 3.
 513    :      data_block_3_ = reinterpret_cast<uint8*>(HeapProxy::InitializeAsanBlock(
 514    :          reinterpret_cast<uint8*>(data_block_2_),
 515    :          kInternalAllocSize,
 516    :          inner_blocks_size_,
 517    :          base::bits::Log2Floor(kInnerBlockAlignment),
 518  E :          stack));
 519  E :      ASSERT_NE(reinterpret_cast<uint8*>(NULL), data_block_3_);
 520  E :      block_3_ = TestHeapProxy::UserPointerToBlockHeader(data_block_3_);
 521  E :      ASSERT_NE(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL), block_3_);
 522    :  
 523    :      // Initialize block 4.
 524    :      data_block_4_ = reinterpret_cast<uint8*>(HeapProxy::InitializeAsanBlock(
 525    :          reinterpret_cast<uint8*>(data_block_2_) + inner_blocks_size_,
 526    :          kInternalAllocSize,
 527    :          inner_blocks_size_,
 528    :          base::bits::Log2Floor(kInnerBlockAlignment),
 529  E :          stack));
 530  E :      ASSERT_NE(reinterpret_cast<uint8*>(NULL), data_block_4_);
 531  E :      block_4_ = TestHeapProxy::UserPointerToBlockHeader(data_block_4_);
 532  E :      ASSERT_NE(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL), block_4_);
 533  E :    }
 534    :  
 535    :   protected:
 536    :    static const size_t kBufferSize = 512;
 537    :    static const size_t kBlock1Alignment = 8;
 538    :    static const size_t kBlock2Alignment = 64;
 539    :    static const size_t kInnerBlockAlignment = 8;
 540    :    static const size_t kInternalAllocSize = 13;
 541    :    static const size_t kGapSize = 5;
 542    :  
 543    :    uint8 buffer_[kBufferSize];
 544    :    uint8* aligned_buffer_;
 545    :  
 546    :    uint8* data_block_1_;
 547    :    uint8* data_block_2_;
 548    :    uint8* data_block_3_;
 549    :    uint8* data_block_4_;
 550    :  
 551    :    size_t block_1_size_;
 552    :    size_t block_2_size_;
 553    :    size_t inner_blocks_size_;
 554    :  
 555    :    TestHeapProxy::BlockHeader* block_1_;
 556    :    TestHeapProxy::BlockHeader* block_2_;
 557    :    TestHeapProxy::BlockHeader* block_3_;
 558    :    TestHeapProxy::BlockHeader* block_4_;
 559    :  };
 560    :  
 561    :  }  // namespace
 562    :  
 563  E :  TEST_F(NestedBlocksTest, FindBlockContainingAddress) {
 564    :    // Test with an address before block 1.
 565    :    EXPECT_EQ(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL),
 566    :        proxy_.FindBlockContainingAddress(
 567  E :            proxy_.BlockHeaderToAsanPointer(block_1_) - 1));
 568    :  
 569    :    // Test with an address in the block header of block 1.
 570  E :    EXPECT_EQ(block_1_, proxy_.FindBlockContainingAddress(data_block_1_ - 1));
 571    :  
 572    :    // Test with an address in the gap section before the header of block 2.
 573    :    EXPECT_EQ(block_1_, proxy_.FindBlockContainingAddress(
 574  E :        proxy_.BlockHeaderToAsanPointer(block_2_) - 1));
 575    :  
 576    :    // Test with an address in the block header of block 2.
 577  E :    EXPECT_EQ(block_2_, proxy_.FindBlockContainingAddress(data_block_2_ - 1));
 578    :  
 579    :    // Test with an address in the block header of block 3.
 580  E :    EXPECT_EQ(block_3_, proxy_.FindBlockContainingAddress(data_block_3_ - 1));
 581    :  
 582    :    // Test the first byte of the data of block 2, it corresponds to the block
 583    :    // header of block 3.
 584  E :    EXPECT_EQ(block_3_, proxy_.FindBlockContainingAddress(data_block_2_));
 585    :  
 586    :    // Test the first byte of the data of block 3.
 587  E :    EXPECT_EQ(block_3_, proxy_.FindBlockContainingAddress(data_block_3_));
 588    :  
 589    :    // Test with an address in the block trailer 3.
 590    :    EXPECT_EQ(block_3_, proxy_.FindBlockContainingAddress(
 591  E :        reinterpret_cast<uint8*>(proxy_.BlockHeaderToBlockTrailer(block_3_))));
 592    :  
 593    :    // Test with an address in the block header of block 4.
 594  E :    EXPECT_EQ(block_4_, proxy_.FindBlockContainingAddress(data_block_4_ - 1));
 595    :  
 596    :    // Test the first byte of the data of block 4.
 597  E :    EXPECT_EQ(block_4_, proxy_.FindBlockContainingAddress(data_block_4_));
 598    :  
 599    :    // Test with an address in the block trailer 4.
 600    :    EXPECT_EQ(block_4_, proxy_.FindBlockContainingAddress(
 601  E :        reinterpret_cast<uint8*>(proxy_.BlockHeaderToBlockTrailer(block_4_))));
 602    :  
 603    :    // Test with an address in the gap section after block 4.
 604    :    EXPECT_EQ(block_2_, proxy_.FindBlockContainingAddress(data_block_2_ +
 605  E :        inner_blocks_size_ * 2));
 606    :  
 607    :    // Test with an address in the block trailer 2.
 608    :    EXPECT_EQ(block_2_, proxy_.FindBlockContainingAddress(
 609  E :        reinterpret_cast<uint8*>(proxy_.BlockHeaderToBlockTrailer(block_2_))));
 610    :  
 611    :    // Test with an address in the block trailer 1.
 612    :    EXPECT_EQ(block_1_, proxy_.FindBlockContainingAddress(
 613  E :        reinterpret_cast<uint8*>(proxy_.BlockHeaderToBlockTrailer(block_1_))));
 614    :  
 615    :    // Test with an address after the block trailer 1.
 616    :    EXPECT_EQ(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL),
 617    :        proxy_.FindBlockContainingAddress(reinterpret_cast<uint8*>(block_1_)
 618  E :            + block_1_size_));
 619  E :  }
 620    :  
 621  E :  TEST_F(NestedBlocksTest, FindContainingBlock) {
 622    :    ASSERT_EQ(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL),
 623  E :              TestHeapProxy::FindContainingBlock(block_1_));
 624  E :    ASSERT_EQ(block_1_, TestHeapProxy::FindContainingBlock(block_2_));
 625  E :    ASSERT_EQ(block_2_, TestHeapProxy::FindContainingBlock(block_3_));
 626  E :    ASSERT_EQ(block_2_, TestHeapProxy::FindContainingBlock(block_4_));
 627  E :  }
 628    :  
 629  E :  TEST_F(NestedBlocksTest, FindContainingFreedBlock) {
 630    :    ASSERT_EQ(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL),
 631  E :              TestHeapProxy::FindContainingFreedBlock(block_1_));
 632    :    ASSERT_EQ(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL),
 633  E :              TestHeapProxy::FindContainingFreedBlock(block_2_));
 634    :    ASSERT_EQ(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL),
 635  E :              TestHeapProxy::FindContainingFreedBlock(block_3_));
 636    :    ASSERT_EQ(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL),
 637  E :              TestHeapProxy::FindContainingFreedBlock(block_4_));
 638    :  
 639    :    // Mark the block 2 as quarantined and makes sure that it is found as the
 640    :    // containing block of block 3 and 4.
 641    :  
 642  E :    proxy_.MarkBlockHeaderAsQuarantined(block_2_);
 643    :  
 644  E :    EXPECT_EQ(block_2_, TestHeapProxy::FindContainingFreedBlock(block_3_));
 645  E :    EXPECT_EQ(block_2_, TestHeapProxy::FindContainingFreedBlock(block_4_));
 646    :  
 647  E :    proxy_.MarkBlockHeaderAsQuarantined(block_3_);
 648  E :    EXPECT_EQ(block_2_, TestHeapProxy::FindContainingFreedBlock(block_4_));
 649    :  
 650  E :    proxy_.MarkBlockHeaderAsAllocated(block_2_);
 651  E :    proxy_.MarkBlockHeaderAsAllocated(block_3_);
 652    :  
 653    :    // Mark the block 1 as quarantined and makes sure that it is found as the
 654    :    // containing block of block 2, 3 and 4.
 655    :  
 656  E :    proxy_.MarkBlockHeaderAsQuarantined(block_1_);
 657    :  
 658  E :    EXPECT_EQ(block_1_, TestHeapProxy::FindContainingFreedBlock(block_2_));
 659  E :    EXPECT_EQ(block_1_, TestHeapProxy::FindContainingFreedBlock(block_3_));
 660  E :    EXPECT_EQ(block_1_, TestHeapProxy::FindContainingFreedBlock(block_4_));
 661    :  
 662  E :    proxy_.MarkBlockHeaderAsQuarantined(block_3_);
 663  E :    EXPECT_EQ(block_1_, TestHeapProxy::FindContainingFreedBlock(block_2_));
 664  E :    EXPECT_EQ(block_1_, TestHeapProxy::FindContainingFreedBlock(block_4_));
 665  E :  }
 666    :  
 667  E :  TEST_F(HeapTest, GetBadAccessKind) {
 668  E :    const size_t kAllocSize = 100;
 669    :    // Ensure that the quarantine is large enough to keep this block, this is
 670    :    // needed for the use-after-free check.
 671  E :    proxy_.SetQuarantineMaxSize(TestHeapProxy::GetAllocSize(kAllocSize));
 672  E :    uint8* mem = static_cast<uint8*>(proxy_.Alloc(0, kAllocSize));
 673  E :    ASSERT_FALSE(mem == NULL);
 674    :    TestHeapProxy::BlockHeader* header =
 675    :        const_cast<TestHeapProxy::BlockHeader*>(
 676  E :            proxy_.UserPointerToBlockHeader(mem));
 677  E :    uint8* heap_underflow_address = mem - 1;
 678  E :    uint8* heap_overflow_address = mem + kAllocSize * sizeof(uint8);
 679  E :    ASSERT_TRUE(proxy_.IsUnderflowAccess(heap_underflow_address, header));
 680  E :    ASSERT_TRUE(proxy_.IsOverflowAccess(heap_overflow_address, header));
 681  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 682  E :    ASSERT_TRUE(proxy_.IsQuarantined(header));
 683  E :    ASSERT_TRUE(proxy_.IsUseAfterAccess(mem, header));
 684  E :  }
 685    :  
 686  E :  TEST_F(HeapTest, GetTimeSinceFree) {
 687  E :    const size_t kAllocSize = 100;
 688  E :    const size_t kSleepTime = 25;
 689    :  
 690    :    // Ensure that the quarantine is large enough to keep this block.
 691  E :    proxy_.SetQuarantineMaxSize(TestHeapProxy::GetAllocSize(kAllocSize));
 692  E :    uint8* mem = static_cast<uint8*>(proxy_.Alloc(0, kAllocSize));
 693    :    TestHeapProxy::BlockHeader* header =
 694    :        const_cast<TestHeapProxy::BlockHeader*>(
 695  E :            proxy_.UserPointerToBlockHeader(mem));
 696    :  
 697  E :    base::TimeTicks time_before_free = base::TimeTicks::HighResNow();
 698  E :    ASSERT_EQ(0U, proxy_.GetTimeSinceFree(header));
 699  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 700  E :    ASSERT_TRUE(proxy_.IsQuarantined(header));
 701  E :    ::Sleep(kSleepTime);
 702  E :    uint64 time_since_free = proxy_.GetTimeSinceFree(header);
 703  E :    ASSERT_NE(0U, time_since_free);
 704    :  
 705  E :    base::TimeDelta time_delta = base::TimeTicks::HighResNow() - time_before_free;
 706  E :    ASSERT_GT(time_delta.ToInternalValue(), 0U);
 707  E :    uint64 time_delta_us = static_cast<uint64>(time_delta.ToInternalValue());
 708  E :    trace::common::ClockInfo clock_info = {};
 709  E :    trace::common::GetClockInfo(&clock_info);
 710  E :    if (clock_info.tsc_info.frequency == 0)
 711  i :      time_delta_us += HeapProxy::kSleepTimeForApproximatingCPUFrequency;
 712    :  
 713  E :    ASSERT_GE(time_delta_us, time_since_free);
 714  E :  }
 715    :  
 716  E :  TEST_F(HeapTest, CaptureTID) {
 717  E :    const size_t kAllocSize = 13;
 718    :    // Ensure that the quarantine is large enough to keep this block.
 719  E :    proxy_.SetQuarantineMaxSize(TestHeapProxy::GetAllocSize(kAllocSize));
 720  E :    uint8* mem = static_cast<uint8*>(proxy_.Alloc(0, kAllocSize));
 721  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 722  E :    ASSERT_TRUE(proxy_.IsQuarantined(proxy_.UserPointerToBlockHeader(mem)));
 723    :  
 724    :    TestHeapProxy::BlockHeader* header =
 725    :        const_cast<TestHeapProxy::BlockHeader*>(
 726  E :            proxy_.UserPointerToBlockHeader(mem));
 727  E :    ASSERT_TRUE(header != NULL);
 728    :    TestHeapProxy::BlockTrailer* trailer =
 729    :        const_cast<TestHeapProxy::BlockTrailer*>(
 730  E :            proxy_.BlockHeaderToBlockTrailer(header));
 731  E :    ASSERT_TRUE(trailer != NULL);
 732    :  
 733  E :    ASSERT_EQ(trailer->alloc_tid, ::GetCurrentThreadId());
 734  E :    ASSERT_EQ(trailer->free_tid, ::GetCurrentThreadId());
 735  E :  }
 736    :  
 737  E :  TEST_F(HeapTest, QuarantineDoesntAlterBlockContents) {
 738  E :    const size_t kAllocSize = 13;
 739    :    // Ensure that the quarantine is large enough to keep this block.
 740  E :    proxy_.SetQuarantineMaxSize(TestHeapProxy::GetAllocSize(kAllocSize));
 741  E :    void* mem = proxy_.Alloc(0, kAllocSize);
 742  E :    ASSERT_TRUE(mem != NULL);
 743  E :    RandomSetMemory(mem, kAllocSize);
 744    :  
 745  E :    unsigned char sha1_before[base::kSHA1Length] = {};
 746    :    base::SHA1HashBytes(reinterpret_cast<unsigned char*>(mem),
 747    :                        kAllocSize,
 748  E :                        sha1_before);
 749    :  
 750    :    TestHeapProxy::BlockHeader* header =
 751    :        const_cast<TestHeapProxy::BlockHeader*>(
 752  E :            proxy_.UserPointerToBlockHeader(mem));
 753    :  
 754  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 755  E :    ASSERT_TRUE(proxy_.IsQuarantined(header));
 756    :  
 757  E :    unsigned char sha1_after[base::kSHA1Length] = {};
 758    :    base::SHA1HashBytes(reinterpret_cast<unsigned char*>(mem),
 759    :                        kAllocSize,
 760  E :                        sha1_after);
 761    :  
 762  E :    ASSERT_EQ(0, memcmp(sha1_before, sha1_after, base::kSHA1Length));
 763  E :  }
 764    :  
 765  E :  TEST_F(HeapTest, InternalStructureArePoisoned) {
 766    :    EXPECT_EQ(Shadow::kAsanMemoryByte,
 767  E :              Shadow::GetShadowMarkerForAddress(TestShadow::shadow_));
 768    :  
 769  E :    const size_t kAllocSize = 13;
 770    :    // Ensure that the quarantine is large enough to keep this block.
 771  E :    proxy_.SetQuarantineMaxSize(TestHeapProxy::GetAllocSize(kAllocSize));
 772  E :    uint8* mem = static_cast<uint8*>(proxy_.Alloc(0, kAllocSize));
 773    :    TestHeapProxy::BlockHeader* header =
 774    :        const_cast<TestHeapProxy::BlockHeader*>(
 775  E :            proxy_.UserPointerToBlockHeader(mem));
 776    :  
 777  E :    ASSERT_TRUE(header != NULL);
 778    :    const void* alloc_stack_cache_addr =
 779  E :        reinterpret_cast<const void*>(header->alloc_stack);
 780    :    EXPECT_EQ(Shadow::kAsanMemoryByte,
 781  E :              Shadow::GetShadowMarkerForAddress(alloc_stack_cache_addr));
 782    :  
 783  E :    ASSERT_TRUE(proxy_.Free(0, mem));
 784  E :  }
 785    :  
 786  E :  TEST_F(HeapTest, GetNullTerminatedArraySize) {
 787    :    // Ensure that the quarantine is large enough to keep the allocated blocks in
 788    :    // this test.
 789  E :    proxy_.SetQuarantineMaxSize(kMaxAllocSize * 2);
 790  E :    const char* test_strings[] = { "", "abc", "abcdefg", "abcdefghijklmno" };
 791    :  
 792  E :    for (size_t i = 0; i < arraysize(test_strings); ++i) {
 793  E :      size_t string_size = strlen(test_strings[i]);
 794    :      char* mem = reinterpret_cast<char*>(
 795  E :          proxy_.Alloc(0, string_size + 1));
 796  E :      ASSERT_TRUE(mem != NULL);
 797  E :      strcpy(static_cast<char*>(mem), test_strings[i]);
 798  E :      size_t size = 0;
 799  E :      EXPECT_TRUE(Shadow::GetNullTerminatedArraySize(mem, &size, 0U));
 800  E :      EXPECT_EQ(string_size, size - 1);
 801  E :      mem[string_size] = 'a';
 802  E :      mem[string_size + 1] = 0;
 803  E :      EXPECT_FALSE(Shadow::GetNullTerminatedArraySize(mem, &size, 0U));
 804  E :      EXPECT_EQ(string_size, size - 1);
 805  E :      ASSERT_TRUE(proxy_.Free(0, mem));
 806  E :    }
 807  E :  }
 808    :  
 809  E :  TEST_F(HeapTest, SetTrailerPaddingSize) {
 810  E :    const size_t kAllocSize = 100;
 811    :    // As we're playing with the padding size in these tests, we need to make sure
 812    :    // that the blocks don't end up in the quarantine, otherwise we won't be able
 813    :    // to unpoison them correctly (we don't keep the padding size in the blocks).
 814  E :    proxy_.SetQuarantineMaxSize(kAllocSize - 1);
 815  E :    size_t original_alloc_size = TestHeapProxy::GetAllocSize(kAllocSize);
 816  E :    size_t original_trailer_padding_size = TestHeapProxy::trailer_padding_size();
 817    :  
 818  E :    for (size_t padding = 0; padding < 16; ++padding) {
 819    :      size_t augmented_trailer_padding_size = original_trailer_padding_size +
 820  E :          padding;
 821  E :      proxy_.set_trailer_padding_size(augmented_trailer_padding_size);
 822  E :      size_t augmented_alloc_size = TestHeapProxy::GetAllocSize(kAllocSize);
 823  E :      EXPECT_GE(augmented_alloc_size, original_alloc_size);
 824    :  
 825  E :      LPVOID mem = proxy_.Alloc(0, kAllocSize);
 826  E :      ASSERT_TRUE(mem != NULL);
 827    :  
 828  E :      size_t offset = kAllocSize;
 829  E :      for (; offset < augmented_alloc_size - sizeof(TestHeapProxy::BlockHeader);
 830  E :           ++offset) {
 831    :        EXPECT_FALSE(Shadow::IsAccessible(
 832  E :            reinterpret_cast<const uint8*>(mem) + offset));
 833  E :      }
 834  E :      ASSERT_TRUE(proxy_.Free(0, mem));
 835  E :    }
 836  E :    proxy_.set_trailer_padding_size(original_trailer_padding_size);
 837  E :  }
 838    :  
 839    :  namespace {
 840    :  
 841    :  // A unittest fixture to test the bookkeeping functions.
 842    :  struct FakeAsanBlock {
 843    :    static const size_t kMaxAlignmentLog = 12;
 844    :    static const size_t kMaxAlignment = 1 << kMaxAlignmentLog;
 845    :    // If we want to test the alignments up to 2048 we need a buffer of at least
 846    :    // 3 * 2048 bytes:
 847    :    // +--- 0 <= size < 2048 bytes---+---2048 bytes---+--2048 bytes--+
 848    :    // ^buffer                       ^aligned_buffer  ^user_pointer
 849    :    static const size_t kBufferSize = 3 * kMaxAlignment;
 850    :    static const uint8 kBufferHeaderValue = 0xAE;
 851    :    static const uint8 kBufferTrailerValue = 0xEA;
 852    :  
 853    :    FakeAsanBlock(TestHeapProxy* proxy, size_t alloc_alignment_log)
 854    :        : proxy(proxy),
 855    :          is_initialized(false),
 856    :          alloc_alignment_log(alloc_alignment_log),
 857    :          alloc_alignment(1 << alloc_alignment_log),
 858  E :          user_ptr(NULL) {
 859    :      // Align the beginning of the buffer to the current granularity. Ensure that
 860    :      // there's room to store magic bytes in front of this block.
 861    :      buffer_align_begin = reinterpret_cast<uint8*>(common::AlignUp(
 862  E :          reinterpret_cast<size_t>(buffer) + 1, alloc_alignment));
 863  E :    }
 864  E :    ~FakeAsanBlock() {
 865  E :      Shadow::Unpoison(buffer_align_begin, asan_alloc_size);
 866  E :      memset(buffer, 0, sizeof(buffer));
 867  E :    }
 868    :  
 869    :    // Initialize an ASan block in the buffer.
 870    :    // @param alloc_size The user size of the ASan block.
 871    :    // @returns true on success, false otherwise.
 872  E :    bool InitializeBlock(size_t alloc_size) {
 873  E :      user_alloc_size = alloc_size;
 874    :      asan_alloc_size = proxy->GetAllocSize(alloc_size,
 875  E :                                            alloc_alignment);
 876    :  
 877    :      // Calculate the size of the zone of the buffer that we use to ensure that
 878    :      // we don't corrupt the heap.
 879  E :      buffer_header_size = buffer_align_begin - buffer;
 880    :      buffer_trailer_size = kBufferSize - buffer_header_size -
 881  E :          asan_alloc_size;
 882  E :      EXPECT_GT(kBufferSize, asan_alloc_size + buffer_header_size);
 883    :  
 884    :      // Initialize the buffer header and trailer.
 885  E :      memset(buffer, kBufferHeaderValue, buffer_header_size);
 886    :      memset(buffer_align_begin + asan_alloc_size,
 887    :             kBufferTrailerValue,
 888  E :             buffer_trailer_size);
 889    :  
 890  E :      StackCapture stack;
 891  E :      stack.InitFromStack();
 892    :      // Initialize the ASan block.
 893    :      user_ptr = proxy->InitializeAsanBlock(buffer_align_begin,
 894    :                                            alloc_size,
 895    :                                            asan_alloc_size,
 896    :                                            alloc_alignment_log,
 897  E :                                            stack);
 898  E :      EXPECT_TRUE(user_ptr != NULL);
 899  E :      EXPECT_TRUE(common::IsAligned(reinterpret_cast<size_t>(user_ptr),
 900    :                                    alloc_alignment));
 901  E :      EXPECT_TRUE(common::IsAligned(
 902    :          reinterpret_cast<size_t>(buffer_align_begin) + asan_alloc_size,
 903    :          Shadow::kShadowGranularity));
 904  E :      EXPECT_TRUE(proxy->UserPointerToAsanPointer(user_ptr) ==
 905    :          buffer_align_begin);
 906  E :      EXPECT_TRUE(proxy->AsanPointerToUserPointer(buffer_align_begin) ==
 907    :          user_ptr);
 908    :  
 909    :      void* expected_user_ptr = reinterpret_cast<void*>(
 910    :          buffer_align_begin + std::max(sizeof(TestHeapProxy::BlockHeader),
 911  E :                                        alloc_alignment));
 912  E :      EXPECT_TRUE(user_ptr == expected_user_ptr);
 913    :  
 914  E :      size_t i = 0;
 915    :      // Ensure that the buffer header is accessible and correctly tagged.
 916  E :      for (; i < buffer_header_size; ++i) {
 917  E :        EXPECT_EQ(kBufferHeaderValue, buffer[i]);
 918  E :        EXPECT_TRUE(Shadow::IsAccessible(buffer + i));
 919  E :      }
 920  E :      size_t user_block_offset = reinterpret_cast<uint8*>(user_ptr) - buffer;
 921    :      // Ensure that the block header isn't accessible.
 922  E :      for (; i < user_block_offset; ++i) {
 923  E :        EXPECT_FALSE(Shadow::IsAccessible(buffer + i));
 924  E :      }
 925    :      // Ensure that the user block is accessible.
 926  E :      size_t block_trailer_offset = i + alloc_size;
 927  E :      for (; i < block_trailer_offset; ++i) {
 928  E :        EXPECT_TRUE(Shadow::IsAccessible(buffer + i));
 929  E :      }
 930    :      // Ensure that the block trailer isn't accessible.
 931  E :      for (; i < buffer_header_size + asan_alloc_size; ++i) {
 932  E :        EXPECT_FALSE(Shadow::IsAccessible(buffer + i));
 933  E :      }
 934    :      // Ensure that the buffer trailer is accessible and correctly tagged.
 935  E :      for (; i < kBufferSize; ++i) {
 936  E :        EXPECT_EQ(kBufferTrailerValue, buffer[i]);
 937  E :        EXPECT_TRUE(Shadow::IsAccessible(buffer + i));
 938  E :      }
 939    :  
 940  E :      is_initialized = true;
 941  E :      return true;
 942  E :    }
 943    :  
 944    :    // Ensures that this block has a valid block header.
 945  E :    bool TestBlockHeader() {
 946  E :      if (!is_initialized)
 947  i :        return false;
 948    :  
 949    :      // Ensure that the block header is valid. UserPointerToBlockHeader takes
 950    :      // care of checking the magic number in the signature of the block.
 951    :      TestHeapProxy::BlockHeader* block_header = proxy->UserPointerToBlockHeader(
 952  E :          user_ptr);
 953  E :      EXPECT_TRUE(block_header != NULL);
 954    :      TestHeapProxy::BlockTrailer* block_trailer =
 955  E :          TestHeapProxy::BlockHeaderToBlockTrailer(block_header);
 956  E :      EXPECT_EQ(::GetCurrentThreadId(), block_trailer->alloc_tid);
 957  E :      EXPECT_EQ(user_alloc_size, block_header->block_size);
 958  E :      EXPECT_EQ(alloc_alignment_log, block_header->alignment_log);
 959  E :      EXPECT_TRUE(block_header->alloc_stack != NULL);
 960  E :      EXPECT_TRUE(proxy->IsAllocated(block_header));
 961    :  
 962  E :      void* tmp_user_pointer = NULL;
 963  E :      size_t tmp_user_size = 0;
 964    :      HeapProxy::GetUserExtent(buffer_align_begin,
 965    :                               &tmp_user_pointer,
 966  E :                               &tmp_user_size);
 967  E :      EXPECT_TRUE(tmp_user_pointer == user_ptr);
 968  E :      EXPECT_EQ(user_alloc_size, tmp_user_size);
 969    :  
 970  E :      void* tmp_asan_pointer = NULL;
 971    :      HeapProxy::GetAsanExtent(user_ptr,
 972    :                               &tmp_asan_pointer,
 973  E :                               &tmp_user_size);
 974  E :      EXPECT_TRUE(tmp_asan_pointer == buffer_align_begin);
 975  E :      EXPECT_EQ(asan_alloc_size, tmp_user_size);
 976    :  
 977    :      // Test the various accessors.
 978  E :      EXPECT_TRUE(proxy->BlockHeaderToUserPointer(block_header) == user_ptr);
 979  E :      EXPECT_TRUE(proxy->BlockHeaderToAsanPointer(block_header) ==
 980    :          buffer_align_begin);
 981  E :      EXPECT_TRUE(proxy->AsanPointerToBlockHeader(buffer_align_begin) ==
 982    :          block_header);
 983    :  
 984  E :      return true;
 985  E :    }
 986    :  
 987    :    // Mark the current ASan block as quarantined.
 988  E :    bool MarkBlockAsQuarantined() {
 989  E :      if (!is_initialized)
 990  i :        return false;
 991    :  
 992    :      TestHeapProxy::BlockHeader* block_header = proxy->UserPointerToBlockHeader(
 993  E :          user_ptr);
 994    :      TestHeapProxy::BlockTrailer* block_trailer =
 995  E :          proxy->BlockHeaderToBlockTrailer(block_header);
 996  E :      EXPECT_TRUE(block_header->free_stack == NULL);
 997  E :      EXPECT_TRUE(block_trailer != NULL);
 998  E :      EXPECT_EQ(0U, block_trailer->free_tid);
 999    :  
1000  E :      StackCapture stack;
1001  E :      stack.InitFromStack();
1002    :      // Mark the block as quarantined.
1003  E :      proxy->MarkBlockAsQuarantined(buffer_align_begin, stack);
1004  E :      EXPECT_TRUE(block_header->free_stack != NULL);
1005  E :      EXPECT_TRUE(proxy->IsQuarantined(block_header));
1006  E :      EXPECT_EQ(::GetCurrentThreadId(), block_trailer->free_tid);
1007    :  
1008  E :      size_t i = 0;
1009    :      // Ensure that the buffer header is accessible and correctly tagged.
1010  E :      for (; i < buffer_header_size; ++i) {
1011  E :        EXPECT_EQ(kBufferHeaderValue, buffer[i]);
1012  E :        EXPECT_TRUE(Shadow::IsAccessible(buffer + i));
1013  E :      }
1014    :      // Ensure that the whole block isn't accessible.
1015  E :      for (; i < buffer_header_size + asan_alloc_size; ++i) {
1016  E :        EXPECT_FALSE(Shadow::IsAccessible(buffer + i));
1017  E :      }
1018    :      // Ensure that the buffer trailer is accessible and correctly tagged.
1019  E :      for (; i < kBufferSize; ++i) {
1020  E :        EXPECT_EQ(kBufferTrailerValue, buffer[i]);
1021  E :        EXPECT_TRUE(Shadow::IsAccessible(buffer + i));
1022  E :      }
1023  E :      return true;
1024  E :    }
1025    :  
1026    :    // The buffer we use internally.
1027    :    uint8 buffer[kBufferSize];
1028    :  
1029    :    // The heap proxy we delegate to.
1030    :    TestHeapProxy* proxy;
1031    :  
1032    :    // The alignment of the current allocation.
1033    :    size_t alloc_alignment;
1034    :    size_t alloc_alignment_log;
1035    :  
1036    :    // The sizes of the different sub-structures in the buffer.
1037    :    size_t asan_alloc_size;
1038    :    size_t user_alloc_size;
1039    :    size_t buffer_header_size;
1040    :    size_t buffer_trailer_size;
1041    :  
1042    :    // The pointers to the different sub-structures in the buffer.
1043    :    uint8* buffer_align_begin;
1044    :    void* user_ptr;
1045    :  
1046    :    // Indicate if the buffer has been initialized.
1047    :    bool is_initialized;
1048    :  };
1049    :  
1050    :  }  // namespace
1051    :  
1052  E :  TEST_F(HeapTest, InitializeAsanBlock) {
1053  E :    for (size_t alloc_alignment_log = Shadow::kShadowGranularityLog;
1054  E :         alloc_alignment_log <= FakeAsanBlock::kMaxAlignmentLog;
1055  E :         ++alloc_alignment_log) {
1056  E :      FakeAsanBlock fake_block(&proxy_, alloc_alignment_log);
1057  E :      const size_t kAllocSize = 100;
1058  E :      EXPECT_TRUE(fake_block.InitializeBlock(kAllocSize));
1059  E :      EXPECT_TRUE(fake_block.TestBlockHeader());
1060  E :    }
1061  E :  }
1062    :  
1063  E :  TEST_F(HeapTest, MarkBlockAsQuarantined) {
1064  E :    for (size_t alloc_alignment_log = Shadow::kShadowGranularityLog;
1065  E :         alloc_alignment_log <= FakeAsanBlock::kMaxAlignmentLog;
1066  E :         ++alloc_alignment_log) {
1067  E :      FakeAsanBlock fake_block(&proxy_, alloc_alignment_log);
1068  E :      const size_t kAllocSize = 100;
1069  E :      EXPECT_TRUE(fake_block.InitializeBlock(kAllocSize));
1070  E :      EXPECT_TRUE(fake_block.TestBlockHeader());
1071  E :      EXPECT_TRUE(fake_block.MarkBlockAsQuarantined());
1072  E :    }
1073  E :  }
1074    :  
1075  E :  TEST_F(HeapTest, DestroyAsanBlock) {
1076  E :    for (size_t alloc_alignment_log = Shadow::kShadowGranularityLog;
1077  E :         alloc_alignment_log <= FakeAsanBlock::kMaxAlignmentLog;
1078  E :         ++alloc_alignment_log) {
1079  E :      FakeAsanBlock fake_block(&proxy_, alloc_alignment_log);
1080  E :      const size_t kAllocSize = 100;
1081  E :      EXPECT_TRUE(fake_block.InitializeBlock(kAllocSize));
1082  E :      EXPECT_TRUE(fake_block.TestBlockHeader());
1083  E :      EXPECT_TRUE(fake_block.MarkBlockAsQuarantined());
1084    :  
1085    :      TestHeapProxy::BlockHeader* block_header = proxy_.UserPointerToBlockHeader(
1086  E :          fake_block.user_ptr);
1087    :      TestHeapProxy::BlockTrailer* block_trailer =
1088  E :          proxy_.BlockHeaderToBlockTrailer(block_header);
1089    :      StackCapture* alloc_stack = const_cast<StackCapture*>(
1090  E :          block_header->alloc_stack);
1091    :      StackCapture* free_stack = const_cast<StackCapture*>(
1092  E :          block_header->free_stack);
1093    :  
1094  E :      ASSERT_TRUE(alloc_stack != NULL);
1095  E :      ASSERT_TRUE(free_stack != NULL);
1096  E :      EXPECT_EQ(1U, alloc_stack->ref_count());
1097  E :      EXPECT_EQ(1U, free_stack->ref_count());
1098  E :      alloc_stack->AddRef();
1099  E :      free_stack->AddRef();
1100  E :      EXPECT_EQ(2U, alloc_stack->ref_count());
1101  E :      EXPECT_EQ(2U, free_stack->ref_count());
1102    :  
1103  E :      proxy_.DestroyAsanBlock(fake_block.buffer_align_begin);
1104    :  
1105  E :      EXPECT_TRUE(proxy_.IsFreed(block_header));
1106  E :      EXPECT_EQ(1U, alloc_stack->ref_count());
1107  E :      EXPECT_EQ(1U, free_stack->ref_count());
1108  E :      alloc_stack->RemoveRef();
1109  E :      free_stack->RemoveRef();
1110  E :    }
1111  E :  }
1112    :  
1113  E :  TEST_F(HeapTest, CloneBlock) {
1114  E :    for (size_t alloc_alignment_log = Shadow::kShadowGranularityLog;
1115  E :         alloc_alignment_log <= FakeAsanBlock::kMaxAlignmentLog;
1116  E :         ++alloc_alignment_log) {
1117    :      // Create a fake block and mark it as quarantined.
1118  E :      FakeAsanBlock fake_block(&proxy_, alloc_alignment_log);
1119  E :      const size_t kAllocSize = 100;
1120  E :      EXPECT_TRUE(fake_block.InitializeBlock(kAllocSize));
1121  E :      EXPECT_TRUE(fake_block.TestBlockHeader());
1122    :      // Fill the block with a non zero value.
1123  E :      memset(fake_block.user_ptr, 0xEE, kAllocSize);
1124  E :      EXPECT_TRUE(fake_block.MarkBlockAsQuarantined());
1125    :  
1126  E :      size_t asan_alloc_size = fake_block.asan_alloc_size;
1127    :  
1128    :      // Get the current count of the alloc and free stack traces.
1129    :      TestHeapProxy::BlockHeader* block_header = proxy_.UserPointerToBlockHeader(
1130  E :          fake_block.user_ptr);
1131    :      StackCapture* alloc_stack = const_cast<StackCapture*>(
1132  E :          block_header->alloc_stack);
1133    :      StackCapture* free_stack = const_cast<StackCapture*>(
1134  E :          block_header->free_stack);
1135    :  
1136  E :      ASSERT_TRUE(alloc_stack != NULL);
1137  E :      ASSERT_TRUE(free_stack != NULL);
1138    :  
1139  E :      size_t alloc_stack_count = alloc_stack->ref_count();
1140  E :      size_t free_stack_count = alloc_stack->ref_count();
1141    :  
1142    :      // Clone the fake block into a second one.
1143  E :      FakeAsanBlock fake_block_2(&proxy_, alloc_alignment_log);
1144    :      proxy_.CloneObject(fake_block.buffer_align_begin,
1145  E :                         fake_block_2.buffer_align_begin);
1146  E :      fake_block_2.asan_alloc_size = asan_alloc_size;
1147    :  
1148    :      // Ensure that the stack trace counts have been incremented.
1149  E :      EXPECT_EQ(alloc_stack_count + 1, alloc_stack->ref_count());
1150  E :      EXPECT_EQ(free_stack_count + 1, free_stack->ref_count());
1151    :  
1152  E :      for (size_t i = 0; i < asan_alloc_size; ++i) {
1153    :        // Ensure that the blocks have the same content.
1154    :        EXPECT_EQ(fake_block.buffer_align_begin[i],
1155  E :                  fake_block_2.buffer_align_begin[i]);
1156    :        EXPECT_EQ(
1157    :            Shadow::GetShadowMarkerForAddress(fake_block.buffer_align_begin + i),
1158    :            Shadow::GetShadowMarkerForAddress(
1159  E :                fake_block_2.buffer_align_begin + i));
1160  E :      }
1161  E :    }
1162  E :  }
1163    :  
1164  E :  TEST_F(HeapTest, GetBadAccessInformation) {
1165  E :    FakeAsanBlock fake_block(&proxy_, Shadow::kShadowGranularityLog);
1166  E :    const size_t kAllocSize = 100;
1167  E :    EXPECT_TRUE(fake_block.InitializeBlock(kAllocSize));
1168    :  
1169  E :    AsanErrorInfo error_info = {};
1170    :    error_info.location = reinterpret_cast<uint8*>(fake_block.user_ptr) +
1171  E :        kAllocSize + 1;
1172  E :    EXPECT_TRUE(HeapProxy::GetBadAccessInformation(&error_info));
1173  E :    EXPECT_EQ(HeapProxy::HEAP_BUFFER_OVERFLOW, error_info.error_type);
1174    :  
1175  E :    EXPECT_TRUE(fake_block.MarkBlockAsQuarantined());
1176  E :    error_info.location = fake_block.user_ptr;
1177  E :    EXPECT_TRUE(HeapProxy::GetBadAccessInformation(&error_info));
1178  E :    EXPECT_EQ(HeapProxy::USE_AFTER_FREE, error_info.error_type);
1179    :  
1180  E :    error_info.location = fake_block.buffer_align_begin - 1;
1181  E :    EXPECT_FALSE(HeapProxy::GetBadAccessInformation(&error_info));
1182  E :  }
1183    :  
1184  E :  TEST_F(HeapTest, GetBadAccessInformationNestedBlock) {
1185    :    // Test a nested use after free. We allocate an outer block and an inner block
1186    :    // inside it, then we mark the outer block as quarantined and we test a bad
1187    :    // access inside the inner block.
1188    :  
1189  E :    FakeAsanBlock fake_block(&proxy_, Shadow::kShadowGranularityLog);
1190  E :    const size_t kInnerBlockAllocSize = 100;
1191    :  
1192    :    // Allocates the outer block.
1193  E :    size_t outer_block_size = TestHeapProxy::GetAllocSize(kInnerBlockAllocSize);
1194  E :    EXPECT_TRUE(fake_block.InitializeBlock(outer_block_size));
1195    :  
1196    :    // Allocates the inner block.
1197  E :    StackCapture stack;
1198  E :    stack.InitFromStack();
1199    :    void* inner_block_data = proxy_.InitializeAsanBlock(
1200    :        reinterpret_cast<uint8*>(fake_block.user_ptr),
1201    :                                 kInnerBlockAllocSize,
1202    :                                 outer_block_size,
1203    :                                 Shadow::kShadowGranularityLog,
1204  E :                                 stack);
1205    :  
1206  E :    ASSERT_NE(reinterpret_cast<void*>(NULL), inner_block_data);
1207    :  
1208    :    TestHeapProxy::BlockHeader* inner_block =
1209  E :        TestHeapProxy::UserPointerToBlockHeader(inner_block_data);
1210  E :    ASSERT_NE(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL), inner_block);
1211    :    TestHeapProxy::BlockHeader* outer_block =
1212  E :        TestHeapProxy::UserPointerToBlockHeader(fake_block.user_ptr);
1213  E :    ASSERT_NE(reinterpret_cast<TestHeapProxy::BlockHeader*>(NULL), outer_block);
1214    :  
1215  E :    AsanErrorInfo error_info = {};
1216    :  
1217    :    // Mark the inner block as quarantined and check that we detect a use after
1218    :    // free when trying to access its data.
1219  E :    proxy_.MarkBlockHeaderAsQuarantined(inner_block);
1220  E :    EXPECT_FALSE(proxy_.IsAllocated(inner_block));
1221  E :    EXPECT_TRUE(proxy_.IsAllocated(outer_block));
1222  E :    EXPECT_NE(reinterpret_cast<void*>(NULL), inner_block->free_stack);
1223    :  
1224  E :    error_info.location = fake_block.user_ptr;
1225  E :    EXPECT_TRUE(HeapProxy::GetBadAccessInformation(&error_info));
1226  E :    EXPECT_EQ(HeapProxy::USE_AFTER_FREE, error_info.error_type);
1227  E :    EXPECT_NE(reinterpret_cast<void*>(NULL), error_info.free_stack);
1228    :  
1229  E :    EXPECT_EQ(inner_block->free_stack->num_frames(), error_info.free_stack_size);
1230  E :    for (size_t i = 0; i < inner_block->free_stack->num_frames(); ++i)
1231  E :      EXPECT_EQ(inner_block->free_stack->frames()[i], error_info.free_stack[i]);
1232    :  
1233    :    // Mark the outer block as quarantined, we should detect a use after free
1234    :    // when trying to access the data of the inner block, and the free stack
1235    :    // should be the one of the outer block.
1236  E :    EXPECT_TRUE(fake_block.MarkBlockAsQuarantined());
1237  E :    EXPECT_FALSE(proxy_.IsAllocated(outer_block));
1238  E :    EXPECT_NE(reinterpret_cast<void*>(NULL), outer_block->free_stack);
1239    :  
1240    :    // Tests an access in the inner block.
1241  E :    error_info.location = inner_block_data;
1242  E :    EXPECT_TRUE(HeapProxy::GetBadAccessInformation(&error_info));
1243  E :    EXPECT_EQ(HeapProxy::USE_AFTER_FREE, error_info.error_type);
1244  E :    EXPECT_NE(reinterpret_cast<void*>(NULL), error_info.free_stack);
1245    :  
1246  E :    EXPECT_EQ(outer_block->free_stack->num_frames(), error_info.free_stack_size);
1247  E :    for (size_t i = 0; i < outer_block->free_stack->num_frames(); ++i)
1248  E :      EXPECT_EQ(outer_block->free_stack->frames()[i], error_info.free_stack[i]);
1249  E :  }
1250    :  
1251    :  }  // namespace asan
1252    :  }  // namespace agent

Coverage information generated Wed Dec 11 11:34:16 2013.