Coverage for /Syzygy/agent/asan/stack_capture_cache.cc

CoverageLines executed / instrumented / missingexe / inst / missLanguageGroup
88.0%1912170.C++source

Line-by-line coverage:

   1    :  // Copyright 2012 Google Inc. All Rights Reserved.
   2    :  //
   3    :  // Licensed under the Apache License, Version 2.0 (the "License");
   4    :  // you may not use this file except in compliance with the License.
   5    :  // You may obtain a copy of the License at
   6    :  //
   7    :  //     http://www.apache.org/licenses/LICENSE-2.0
   8    :  //
   9    :  // Unless required by applicable law or agreed to in writing, software
  10    :  // distributed under the License is distributed on an "AS IS" BASIS,
  11    :  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12    :  // See the License for the specific language governing permissions and
  13    :  // limitations under the License.
  14    :  
  15    :  #include "syzygy/agent/asan/stack_capture_cache.h"
  16    :  
  17    :  #include <algorithm>
  18    :  
  19    :  #include "base/logging.h"
  20    :  #include "base/stringprintf.h"
  21    :  #include "syzygy/agent/asan/asan_logger.h"
  22    :  #include "syzygy/agent/asan/stack_capture.h"
  23    :  
  24    :  namespace agent {
  25    :  namespace asan {
  26    :  
  27    :  namespace {
  28    :  
  29    :  // Gives us access to the first frame of a stack capture as link-list pointer.
  30  E :  StackCapture** GetFirstFrameAsLink(StackCapture* stack_capture) {
  31  E :    DCHECK(stack_capture != NULL);
  32    :    StackCapture** link = reinterpret_cast<StackCapture**>(
  33  E :        const_cast<void**>(stack_capture->frames()));
  34  E :    DCHECK(link != NULL);
  35  E :    return link;
  36  E :  }
  37    :  
  38    :  }  // namespace
  39    :  
  40    :  size_t StackCaptureCache::compression_reporting_period_ =
  41    :      StackCaptureCache::kDefaultCompressionReportingPeriod;
  42    :  
  43  E :  StackCaptureCache::CachePage::~CachePage() {
  44    :    // It's our parent StackCaptureCache's responsibility to clean up the linked
  45    :    // list of cache pages. We balk if we're being deleted and haven't been
  46    :    // properly unlinked from the linked list.
  47  E :    DCHECK(next_page_ == NULL);
  48  E :    Shadow::Unpoison(this, sizeof(CachePage));
  49  E :  }
  50    :  
  51    :  StackCapture* StackCaptureCache::CachePage::GetNextStackCapture(
  52  E :      size_t max_num_frames) {
  53  E :    size_t size = StackCapture::GetSize(max_num_frames);
  54  E :    if (bytes_used_ + size > kDataSize)
  55  E :      return NULL;
  56    :  
  57    :    // Use placement new.
  58  E :    StackCapture* stack = new(data_ + bytes_used_) StackCapture(max_num_frames);
  59  E :    bytes_used_ += size;
  60    :  
  61  E :    return stack;
  62  E :  }
  63    :  
  64    :  bool StackCaptureCache::CachePage::ReturnStackCapture(
  65  E :      StackCapture* stack_capture) {
  66  E :    DCHECK(stack_capture != NULL);
  67    :  
  68  E :    uint8* stack = reinterpret_cast<uint8*>(stack_capture);
  69  E :    size_t size = stack_capture->Size();
  70    :  
  71    :    // If this was the last stack capture provided by this page then the end of
  72    :    // it must align with our current data pointer.
  73  E :    if (data_ + bytes_used_ != stack + size)
  74  i :      return false;
  75    :  
  76  E :    bytes_used_ -= size;
  77  E :    return true;
  78  E :  }
  79    :  
  80    :  StackCaptureCache::StackCaptureCache(AsanLogger* logger)
  81    :      : logger_(logger),
  82    :        max_num_frames_(StackCapture::kMaxNumFrames),
  83  E :        current_page_(new CachePage(NULL)) {
  84  E :    CHECK(current_page_ != NULL);
  85  E :    DCHECK(logger_ != NULL);
  86  E :    ::memset(&statistics_, 0, sizeof(statistics_));
  87  E :    ::memset(reclaimed_, 0, sizeof(reclaimed_));
  88  E :    statistics_.size = sizeof(CachePage);
  89  E :  }
  90    :  
  91    :  StackCaptureCache::StackCaptureCache(AsanLogger* logger, size_t max_num_frames)
  92    :      : logger_(logger),
  93    :        max_num_frames_(0),
  94  E :        current_page_(new CachePage(NULL)) {
  95  E :    CHECK(current_page_ != NULL);
  96  E :    DCHECK(logger_ != NULL);
  97  E :    DCHECK_LT(0u, max_num_frames);
  98    :    max_num_frames_ = static_cast<uint8>(
  99  E :        std::min(max_num_frames, StackCapture::kMaxNumFrames));
 100  E :    ::memset(&statistics_, 0, sizeof(statistics_));
 101  E :    ::memset(reclaimed_, 0, sizeof(reclaimed_));
 102  E :    statistics_.size = sizeof(CachePage);
 103  E :  }
 104    :  
 105  E :  StackCaptureCache::~StackCaptureCache() {
 106    :    // Clean up the linked list of cache pages.
 107  E :    while (current_page_ != NULL) {
 108  E :      CachePage* page = current_page_;
 109  E :      current_page_ = page->next_page_;
 110  E :      page->next_page_ = NULL;
 111  E :      delete page;
 112  E :    }
 113  E :  }
 114    :  
 115  E :  void StackCaptureCache::Init() {
 116  E :    compression_reporting_period_ = kDefaultCompressionReportingPeriod;
 117  E :  }
 118    :  
 119    :  const StackCapture* StackCaptureCache::SaveStackTrace(
 120  E :      StackId stack_id, const void* const* frames, size_t num_frames) {
 121  E :    DCHECK(frames != NULL);
 122  E :    DCHECK_NE(num_frames, 0U);
 123  E :    DCHECK(current_page_ != NULL);
 124    :  
 125  E :    bool already_cached = false;
 126  E :    StackCapture* stack_trace = NULL;
 127  E :    bool saturated = false;
 128    :  
 129    :    {
 130  E :      size_t known_stack_shard = stack_id % kKnownStacksSharding;
 131    :      // Get or insert the current stack trace while under the lock for this
 132    :      // bucket.
 133  E :      base::AutoLock auto_lock(known_stacks_locks_[known_stack_shard]);
 134    :  
 135    :      // Check if the stack capture is already in the cache map.
 136  E :      StackCapture capture;
 137  E :      capture.set_stack_id(stack_id);
 138  E :      StackSet::iterator result = known_stacks_[known_stack_shard].find(&capture);
 139    :  
 140    :      // If this capture has not already been cached then we have to initialize
 141    :      // the data.
 142  E :      if (result == known_stacks_[known_stack_shard].end()) {
 143  E :        stack_trace = GetStackCapture(num_frames);
 144  E :        DCHECK(stack_trace != NULL);
 145  E :        stack_trace->InitFromBuffer(stack_id, frames, num_frames);
 146    :        std::pair<StackSet::iterator, bool> it =
 147  E :            known_stacks_[known_stack_shard].insert(stack_trace);
 148  E :        DCHECK(it.second);
 149  E :        DCHECK(stack_trace->HasNoRefs());
 150  E :      } else {
 151  E :        already_cached = true;
 152  E :        stack_trace = *result;
 153    :      }
 154    :      // Increment the reference count for this stack trace.
 155  E :      if (!stack_trace->RefCountIsSaturated()) {
 156  E :        stack_trace->AddRef();
 157  E :      } else {
 158  E :        saturated = true;
 159    :      }
 160  E :    }
 161  E :    DCHECK(stack_trace != NULL);
 162    :  
 163  E :    bool must_log = false;
 164  E :    Statistics statistics = {};
 165    :    // Update the statistics.
 166  E :    if (compression_reporting_period_ != 0) {
 167  E :      base::AutoLock stats_lock(stats_lock_);
 168  E :      if (already_cached) {
 169    :        // If the existing stack capture is previously unreferenced and becoming
 170    :        // referenced again, then decrement the unreferenced counter.
 171  E :        if (stack_trace->HasNoRefs()) {
 172  i :          DCHECK_LT(0u, statistics_.unreferenced);
 173  i :          --statistics_.unreferenced;
 174    :        }
 175  E :      } else {
 176  E :        ++statistics_.cached;
 177  E :        statistics_.frames_alive += num_frames;
 178  E :        ++statistics_.allocated;
 179    :      }
 180  E :      if (!saturated && stack_trace->RefCountIsSaturated()) {
 181  E :        saturated = true;
 182  E :        ++statistics_.saturated;
 183    :      }
 184  E :      ++statistics_.requested;
 185  E :      ++statistics_.references;
 186  E :      statistics_.frames_stored += num_frames;
 187  E :      if (statistics_.requested % compression_reporting_period_ == 0) {
 188  E :        must_log = true;
 189  E :        GetStatisticsUnlocked(&statistics);
 190    :      }
 191  E :    }
 192    :  
 193  E :    if (must_log)
 194  E :      LogStatisticsImpl(statistics);
 195    :  
 196    :    // Return the stack trace pointer that is now in the cache.
 197  E :    return stack_trace;
 198  E :  }
 199    :  
 200    :  const StackCapture* StackCaptureCache::SaveStackTrace(
 201  E :      const StackCapture& stack_capture) {
 202    :    return SaveStackTrace(stack_capture.stack_id(),
 203    :                          stack_capture.frames(),
 204  E :                          stack_capture.num_frames());
 205  E :  }
 206    :  
 207  E :  void StackCaptureCache::ReleaseStackTrace(const StackCapture* stack_capture) {
 208  E :    DCHECK(stack_capture != NULL);
 209    :  
 210  E :    size_t known_stack_shard = stack_capture->stack_id() % kKnownStacksSharding;
 211  E :    bool add_to_reclaimed_list = false;
 212  E :    StackCapture* stack = NULL;
 213    :    {
 214  E :      base::AutoLock auto_lock(known_stacks_locks_[known_stack_shard]);
 215    :  
 216    :      // We own the stack so its fine to remove the const. We double check this
 217    :      // is the case in debug builds with the DCHECK.
 218  E :      stack = const_cast<StackCapture*>(stack_capture);
 219    :      DCHECK(known_stacks_[known_stack_shard].find(stack) !=
 220  E :          known_stacks_[known_stack_shard].end());
 221    :  
 222  E :      stack->RemoveRef();
 223    :  
 224  E :      if (stack->HasNoRefs()) {
 225  E :        add_to_reclaimed_list = true;
 226    :        // Remove this from the known stacks as we're going to reclaim it and
 227    :        // overwrite part of its data as we insert into the reclaimed_ list.
 228  E :        size_t num_erased = known_stacks_[known_stack_shard].erase(stack);
 229  E :        DCHECK_EQ(num_erased, 1u);
 230    :      }
 231  E :    }
 232    :  
 233    :    // Link this stack capture into the list of reclaimed stacks.
 234  E :    if (add_to_reclaimed_list)
 235  E :      AddStackCaptureToReclaimedList(stack);
 236    :  
 237    :    // Update the statistics.
 238  E :    if (compression_reporting_period_ != 0) {
 239  E :      base::AutoLock stats_lock(stats_lock_);
 240  E :      DCHECK_LT(0u, statistics_.references);
 241  E :      --statistics_.references;
 242  E :      statistics_.frames_stored -= stack->num_frames();
 243  E :      if (add_to_reclaimed_list) {
 244  E :        --statistics_.cached;
 245  E :        ++statistics_.unreferenced;
 246    :        // The frames in this stack capture are no longer alive.
 247  E :        statistics_.frames_alive -= stack->num_frames();
 248    :      }
 249  E :    }
 250  E :  }
 251    :  
 252  E :  void StackCaptureCache::LogStatistics()  {
 253  E :    Statistics statistics = {};
 254    :  
 255    :    {
 256  E :      base::AutoLock auto_lock(stats_lock_);
 257  E :      GetStatisticsUnlocked(&statistics);
 258  E :    }
 259    :  
 260  E :    LogStatisticsImpl(statistics);
 261  E :  }
 262    :  
 263  E :  void StackCaptureCache::GetStatisticsUnlocked(Statistics* statistics) const {
 264    :  #ifndef NDEBUG
 265  E :    stats_lock_.AssertAcquired();
 266    :  #endif
 267    :  
 268  E :    DCHECK(statistics != NULL);
 269  E :    *statistics = statistics_;
 270  E :  }
 271    :  
 272  E :  void StackCaptureCache::LogStatisticsImpl(const Statistics& statistics) const {
 273    :    // The cache has 3 categories of storage.
 274    :    // alive frames: these are actively participating in storing a stack trace.
 275    :    // dead frames: these are unreferenced stack traces that are eligible for
 276    :    //     reuse, but are currently dormant.
 277    :    // overhead: frames in a stack-capture that aren't used, padding at the end
 278    :    //     cache pages, cache page metadata, stack capture metadata, etc.
 279    :  
 280    :    // These are all in bytes.
 281  E :    double cache_size = statistics.size;
 282  E :    double alive_size = statistics.frames_alive * 4;
 283  E :    double dead_size = statistics.frames_dead * 4;
 284  E :    double stored_size = statistics.frames_stored * 4;
 285    :  
 286    :    // The |cache_size| is the actual size of storage taken, while |stored_size|
 287    :    // is the conceptual amount of frame data that is stored in the cache.
 288  E :    double compression = 100.0 * (1.0 - (cache_size / stored_size));
 289  E :    double alive = 100.0 * alive_size / cache_size;
 290  E :    double dead = 100.0 * dead_size / cache_size;
 291  E :    double overhead = 100.0 - alive - dead;
 292    :  
 293    :    logger_->Write(base::StringPrintf(
 294    :        "PID=%d; Stack cache size=%.2f MB; Compression=%.2f%%; "
 295    :        "Alive=%.2f%%; Dead=%.2f%%; Overhead=%.2f%%; Saturated=%d; Entries=%d",
 296    :        ::GetCurrentProcessId(),
 297    :        cache_size / 1024.0 / 1024.0,
 298    :        compression,
 299    :        alive,
 300    :        dead,
 301    :        overhead,
 302    :        statistics.saturated,
 303  E :        statistics.cached));
 304  E :  }
 305    :  
 306  E :  StackCapture* StackCaptureCache::GetStackCapture(size_t num_frames) {
 307  E :    StackCapture* stack_capture = NULL;
 308    :  
 309    :    // First look to the reclaimed stacks and try to use one of those. We'll use
 310    :    // the first one that's big enough.
 311  E :    for (size_t n = num_frames; n <= max_num_frames_; ++n) {
 312  E :      base::AutoLock lock(reclaimed_locks_[n]);
 313  E :      if (reclaimed_[n] != NULL) {
 314  E :        StackCapture* reclaimed_stack_capture = reclaimed_[n];
 315  E :        StackCapture** link = GetFirstFrameAsLink(reclaimed_stack_capture);
 316  E :        reclaimed_[n] = *link;
 317  E :        stack_capture = reclaimed_stack_capture;
 318  E :        break;
 319    :      }
 320  E :    }
 321    :  
 322  E :    if (stack_capture != NULL) {
 323  E :      if (compression_reporting_period_ != 0) {
 324  i :        base::AutoLock stats_lock(stats_lock_);
 325    :        // These frames are no longer dead, but in limbo. If the stack capture
 326    :        // is used they'll be added to frames_alive and frames_stored.
 327  i :        statistics_.frames_dead -= stack_capture->max_num_frames();
 328  i :      }
 329  E :      return stack_capture;
 330    :    }
 331    :  
 332  E :    StackCapture* unused_stack_capture = NULL;
 333    :    {
 334  E :      base::AutoLock current_page_lock(current_page_lock_);
 335    :  
 336    :      // We didn't find a reusable stack capture. Go to the cache page.
 337  E :      stack_capture = current_page_->GetNextStackCapture(num_frames);
 338    :  
 339  E :      if (stack_capture != NULL)
 340  E :        return stack_capture;
 341    :  
 342    :      // If the allocation failed we don't have enough room on the current page.
 343    :  
 344    :      // Use the remaining bytes to create one more maximally sized stack
 345    :      // capture. We will stuff this into the reclaimed_ structure for later
 346    :      // use.
 347  i :      size_t bytes_left = current_page_->bytes_left();
 348  i :      size_t max_num_frames = StackCapture::GetMaxNumFrames(bytes_left);
 349  i :      if (max_num_frames > 0) {
 350  i :        DCHECK_LT(max_num_frames, num_frames);
 351  i :        DCHECK_LE(StackCapture::GetSize(max_num_frames), bytes_left);
 352    :        unused_stack_capture =
 353  i :            current_page_->GetNextStackCapture(max_num_frames);
 354  i :        DCHECK(unused_stack_capture != NULL);
 355    :      }
 356    :  
 357    :      // Allocate a new page (that links to the current page) and use it to
 358    :      // allocate a new stack capture.
 359  i :      current_page_ = new CachePage(current_page_);
 360  i :      CHECK(current_page_ != NULL);
 361  i :      statistics_.size += sizeof(CachePage);
 362  i :      stack_capture = current_page_->GetNextStackCapture(num_frames);
 363  i :    }
 364    :  
 365  i :    if (unused_stack_capture != NULL) {
 366    :      // We're creating an unreferenced stack capture.
 367  i :      AddStackCaptureToReclaimedList(unused_stack_capture);
 368    :    }
 369    :  
 370    :    // Update the statistics.
 371  i :    if (compression_reporting_period_ != 0) {
 372  i :      base::AutoLock stats_lock(stats_lock_);
 373  i :      ++statistics_.unreferenced;
 374  i :    }
 375    :  
 376  i :    DCHECK(stack_capture != NULL);
 377  i :    return stack_capture;
 378  E :  }
 379    :  
 380    :  void StackCaptureCache::AddStackCaptureToReclaimedList(
 381  E :      StackCapture* stack_capture) {
 382  E :    DCHECK(stack_capture != NULL);
 383    :    {
 384  E :      base::AutoLock lock(reclaimed_locks_[stack_capture->max_num_frames()]);
 385    :  
 386  E :      StackCapture** link = GetFirstFrameAsLink(stack_capture);
 387  E :      size_t num_frames = stack_capture->max_num_frames();
 388  E :      *link = reclaimed_[num_frames];
 389  E :      reclaimed_[num_frames] = stack_capture;
 390  E :    }
 391    :  
 392    :    // Update the statistics.
 393  E :    if (compression_reporting_period_ != 0) {
 394  E :      base::AutoLock stats_lock(stats_lock_);
 395  E :      statistics_.frames_dead += stack_capture->max_num_frames();
 396  E :    }
 397  E :  }
 398    :  
 399    :  }  // namespace asan
 400    :  }  // namespace agent

Coverage information generated Wed Dec 11 11:34:16 2013.