Coverage for /Syzygy/agent/profiler/profiler.cc

CoverageLines executed / instrumented / missingexe / inst / missLanguageGroup
82.1%2523070.C++source

Line-by-line coverage:

   1    :  // Copyright 2012 Google Inc. All Rights Reserved.
   2    :  //
   3    :  // Licensed under the Apache License, Version 2.0 (the "License");
   4    :  // you may not use this file except in compliance with the License.
   5    :  // You may obtain a copy of the License at
   6    :  //
   7    :  //     http://www.apache.org/licenses/LICENSE-2.0
   8    :  //
   9    :  // Unless required by applicable law or agreed to in writing, software
  10    :  // distributed under the License is distributed on an "AS IS" BASIS,
  11    :  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12    :  // See the License for the specific language governing permissions and
  13    :  // limitations under the License.
  14    :  //
  15    :  // Implementation of the profiler DLL.
  16    :  #include "syzygy/agent/profiler/profiler.h"
  17    :  
  18    :  #include <windows.h>
  19    :  #include <algorithm>
  20    :  
  21    :  #include "base/at_exit.h"
  22    :  #include "base/command_line.h"
  23    :  #include "base/environment.h"
  24    :  #include "base/file_path.h"
  25    :  #include "base/lazy_instance.h"
  26    :  #include "base/logging.h"
  27    :  #include "base/string_util.h"
  28    :  #include "base/utf_string_conversions.h"
  29    :  #include "base/memory/scoped_ptr.h"
  30    :  #include "base/win/pe_image.h"
  31    :  #include "base/win/scoped_handle.h"
  32    :  #include "syzygy/agent/common/dlist.h"
  33    :  #include "syzygy/agent/common/process_utils.h"
  34    :  #include "syzygy/agent/common/scoped_last_error_keeper.h"
  35    :  #include "syzygy/agent/profiler/return_thunk_factory.h"
  36    :  #include "syzygy/common/logging.h"
  37    :  #include "syzygy/trace/client/client_utils.h"
  38    :  #include "syzygy/trace/protocol/call_trace_defs.h"
  39    :  
  40    :  namespace {
  41    :  
  42    :  using agent::common::ScopedLastErrorKeeper;
  43    :  
  44    :  // All tracing runs through this object.
  45    :  base::LazyInstance<agent::profiler::Profiler> static_profiler_instance =
  46    :      LAZY_INSTANCE_INITIALIZER;
  47    :  
  48    :  typedef std::pair<RetAddr, FuncAddr> InvocationKey;
  49    :  
  50    :  class HashInvocationKey {
  51    :   public:
  52    :    static const size_t bucket_size = 4;
  53    :    static const size_t min_buckets = 8;
  54    :  
  55  E :    size_t operator()(const InvocationKey& key) const {
  56    :      return reinterpret_cast<size_t>(key.first) ^
  57  E :          reinterpret_cast<size_t>(key.second);
  58  E :    }
  59    :  
  60  E :    bool operator()(const InvocationKey& a, const InvocationKey& b) const {
  61  E :      return a < b;
  62  E :    }
  63    :  };
  64    :  typedef base::hash_map<
  65    :      InvocationKey, InvocationInfo*, HashInvocationKey> InvocationMap;
  66    :  
  67    :  // Accessing a module acquired from process iteration calls is inherently racy,
  68    :  // as we don't hold any kind of reference to the module, and so the module
  69    :  // could be unloaded while we're accessing it. In practice this shouldn't
  70    :  // happen to us, as we'll be running under the loader's lock in all cases.
  71    :  bool CaptureModuleInformation(const base::win::PEImage& image,
  72    :                                TraceModuleData* module_event) {
  73    :    __try {
  74    :      // Populate the log record.
  75    :      module_event->module_base_size =
  76    :          image.GetNTHeaders()->OptionalHeader.SizeOfImage;
  77    :      module_event->module_checksum =
  78    :          image.GetNTHeaders()->OptionalHeader.CheckSum;
  79    :      module_event->module_time_date_stamp =
  80    :          image.GetNTHeaders()->FileHeader.TimeDateStamp;
  81    :    } __except(EXCEPTION_EXECUTE_HANDLER) {
  82    :      return false;
  83    :    }
  84    :  
  85    :    return true;
  86    :  }
  87    :  
  88    :  // The information on how to set the thread name comes from
  89    :  // a MSDN article: http://msdn2.microsoft.com/en-us/library/xcb2z8hs.aspx
  90    :  const DWORD kVCThreadNameException = 0x406D1388;
  91    :  
  92    :  typedef struct tagTHREADNAME_INFO {
  93    :    DWORD dwType;  // Must be 0x1000.
  94    :    LPCSTR szName;  // Pointer to name (in user addr space).
  95    :    DWORD dwThreadID;  // Thread ID (-1=caller thread).
  96    :    DWORD dwFlags;  // Reserved for future use, must be zero.
  97    :  } THREADNAME_INFO;
  98    :  
  99    :  }  // namespace
 100    :  
 101    :  // See client.cc for a description of the unconventional
 102    :  // calling conventions for this function.
 103  i :  extern "C" void __declspec(naked) _indirect_penter() {
 104    :    __asm {
 105    :      // Stash volatile registers.
 106  i :      push eax
 107  i :      push edx
 108    :  
 109    :      // Get the current cycle time ASAP.
 110  i :      rdtsc
 111    :  
 112    :      // Save the value of eax for later, we need the register to stash the flags.
 113  i :      push ecx
 114  i :      mov ecx, eax
 115    :  
 116    :      // Save the low byte of the flags into AH.
 117  i :      lahf
 118    :      // Save the overflow flag into AL.
 119  i :      seto al
 120    :  
 121    :      // Stash the flags to stack.
 122  i :      push eax
 123    :  
 124    :      // Push the cycle time arg.
 125  i :      push edx
 126  i :      push ecx
 127    :  
 128    :      // Retrieve the original function address, pushed by our caller.
 129  i :      mov eax, DWORD PTR[esp + 0x18]
 130  i :      push eax
 131    :  
 132    :      // Calculate the position of the return address on stack, and
 133    :      // push it. This becomes the EntryFrame argument.
 134  i :      lea eax, DWORD PTR[esp + 0x20]
 135  i :      push eax
 136  i :      call agent::profiler::Profiler::FunctionEntryHook
 137    :  
 138    :      // Restore volatile registers.
 139  i :      pop eax
 140    :      // AL is set to 1 if the overflow flag was set before the call to
 141    :      // our hook, 0 otherwise. We add 0x7f to it so it'll restore the
 142    :      // flag.
 143  i :      add al, 0x7f
 144    :      // Restore the low byte of the flags.
 145  i :      sahf
 146    :  
 147  i :      pop ecx
 148  i :      pop edx
 149  i :      pop eax
 150    :  
 151    :      // Return to the address pushed by our caller.
 152  i :      ret
 153    :    }
 154    :  }
 155    :  
 156  i :  extern "C" void __declspec(naked) _indirect_penter_dllmain() {
 157    :    __asm {
 158    :      // Stash volatile registers.
 159  i :      push eax
 160  i :      push edx
 161    :  
 162    :      // Get the current cycle time ASAP.
 163  i :      rdtsc
 164    :  
 165    :      // Save the value of eax for later, we need the register to stash the flags.
 166  i :      push ecx
 167  i :      mov ecx, eax
 168    :  
 169    :      // Save the low byte of the flags into AH.
 170  i :      lahf
 171    :      // Save the overflow flag into AL.
 172  i :      seto al
 173    :  
 174    :      // Stash the flags to stack.
 175  i :      push eax
 176    :  
 177    :      // Push the cycle time arg.
 178  i :      push edx
 179  i :      push ecx
 180    :  
 181    :      // Retrieve the original function address, pushed by our caller.
 182  i :      mov eax, DWORD PTR[esp + 0x18]
 183  i :      push eax
 184    :  
 185    :      // Calculate the position of the return address on stack, and
 186    :      // push it. This becomes the EntryFrame argument.
 187  i :      lea eax, DWORD PTR[esp + 0x20]
 188  i :      push eax
 189  i :      call agent::profiler::Profiler::DllMainEntryHook
 190    :  
 191    :      // Restore volatile registers.
 192  i :      pop eax
 193    :      // AL is set to 1 if the overflow flag was set before the call to
 194    :      // our hook, 0 otherwise. We add 0x7f to it so it'll restore the
 195    :      // flag.
 196  i :      add al, 0x7f
 197    :      // Restore the low byte of the flags.
 198  i :      sahf
 199    :  
 200  i :      pop ecx
 201  i :      pop edx
 202  i :      pop eax
 203    :  
 204    :      // Return to the address pushed by our caller.
 205  i :      ret
 206    :    }
 207    :  }
 208    :  
 209    :  // On entry, pc_location should point to a location on our own stack.
 210    :  extern "C" uintptr_t __cdecl ResolveReturnAddressLocation(
 211  E :      uintptr_t pc_location) {
 212    :    using agent::profiler::Profiler;
 213  E :    Profiler* profiler = Profiler::Instance();
 214    :    return reinterpret_cast<uintptr_t>(
 215    :        profiler->ResolveReturnAddressLocation(
 216  E :            reinterpret_cast<RetAddr*>(pc_location)));
 217  E :  }
 218    :  
 219  E :  BOOL WINAPI DllMain(HMODULE instance, DWORD reason, LPVOID reserved) {
 220    :    using agent::profiler::Profiler;
 221    :  
 222    :    // Our AtExit manager required by base.
 223    :    static base::AtExitManager* at_exit = NULL;
 224    :  
 225  E :    switch (reason) {
 226    :      case DLL_PROCESS_ATTACH:
 227  E :        DCHECK(at_exit == NULL);
 228  E :        at_exit = new base::AtExitManager();
 229    :  
 230  E :        CommandLine::Init(0, NULL);
 231  E :        common::InitLoggingForDll(L"profiler");
 232  E :        break;
 233    :  
 234    :      case DLL_THREAD_DETACH:
 235  E :        Profiler::Instance()->OnThreadDetach();
 236  E :        break;
 237    :  
 238    :      case DLL_PROCESS_DETACH:
 239  E :        DCHECK(at_exit != NULL);
 240  E :        delete at_exit;
 241  E :        at_exit = NULL;
 242    :        break;
 243    :  
 244    :      default:
 245    :        break;
 246    :    }
 247    :  
 248  E :    return TRUE;
 249  E :  }
 250    :  
 251    :  namespace agent {
 252    :  namespace profiler {
 253    :  
 254    :  class Profiler::ThreadState
 255    :      : public ReturnThunkFactoryImpl<Profiler::ThreadState>,
 256    :        public agent::common::ThreadStateBase {
 257    :   public:
 258    :    explicit ThreadState(Profiler* profiler);
 259    :    ~ThreadState();
 260    :  
 261    :    // Logs @p module and all other modules in the process, then flushes
 262    :    // the current trace buffer.
 263    :    void LogAllModules(HMODULE module);
 264    :  
 265    :    // Logs @p module.
 266    :    void LogModule(HMODULE module);
 267    :  
 268    :    // Logs @p thread_name as the current thread's name.
 269    :    void LogThreadName(const base::StringPiece& thread_name);
 270    :  
 271    :    // Processes a single function entry.
 272    :    void OnFunctionEntry(EntryFrame* entry_frame,
 273    :                         FuncAddr function,
 274    :                         uint64 cycles);
 275    :  
 276    :    // @name Callback notification implementation.
 277    :    // @{
 278    :    virtual void OnPageAdded(const void* page) OVERRIDE;
 279    :    virtual void OnPageRemoved(const void* page) OVERRIDE;
 280    :    // @}
 281    :  
 282    :    // Function exit hook.
 283    :    void OnFunctionExit(const ThunkData* data, uint64 cycles_exit);
 284    :  
 285  E :    trace::client::TraceFileSegment* segment() { return &segment_; }
 286    :  
 287    :   private:
 288    :    friend class Profiler;
 289    :  
 290    :    void RecordInvocation(RetAddr caller,
 291    :                          FuncAddr function,
 292    :                          uint64 cycles);
 293    :  
 294    :    void UpdateOverhead(uint64 entry_cycles);
 295    :    InvocationInfo* AllocateInvocationInfo();
 296    :    bool FlushSegment();
 297    :  
 298    :    // The profiler we're attached to.
 299    :    Profiler* profiler_;
 300    :  
 301    :    // We keep a running tally of the rough amount of wall clock cycles spent
 302    :    // inside the profiler. We then subtract the profiler's overhead from the
 303    :    // wall clock cycle timer on each measurement. This results in a timer that
 304    :    // measures time exclusive of profiling overhead.
 305    :    uint64 cycles_overhead_;
 306    :  
 307    :    // The invocations we've recorded in our buffer.
 308    :    InvocationMap invocations_;
 309    :  
 310    :    // The trace file segment we're recording to.
 311    :    trace::client::TraceFileSegment segment_;
 312    :  
 313    :    // The current batch record we're writing to, if any.
 314    :    TraceBatchInvocationInfo* batch_;
 315    :  
 316    :    // The set of modules we've logged.
 317    :    ModuleSet logged_modules_;
 318    :  };
 319    :  
 320    :  Profiler::ThreadState::ThreadState(Profiler* profiler)
 321    :      : profiler_(profiler),
 322    :        cycles_overhead_(0LL),
 323  E :        batch_(NULL) {
 324  E :    Initialize();
 325  E :  }
 326    :  
 327  E :  Profiler::ThreadState::~ThreadState() {
 328  E :    batch_ = NULL;
 329  E :    invocations_.clear();
 330    :  
 331    :    // If we have an outstanding buffer, let's deallocate it now.
 332  E :    if (segment_.write_ptr != NULL)
 333  E :      profiler_->session_.ReturnBuffer(&segment_);
 334    :  
 335  E :    Uninitialize();
 336  E :  }
 337    :  
 338  E :  void Profiler::ThreadState::LogAllModules(HMODULE module) {
 339    :    // Bail early if we're disabled.
 340  E :    if (profiler_->session_.IsDisabled())
 341  E :      return;
 342    :  
 343  E :    agent::common::ModuleVector modules;
 344  E :    agent::common::GetProcessModules(&modules);
 345    :  
 346    :    // Our module should be in the process modules.
 347  E :    DCHECK(std::find(modules.begin(), modules.end(), module) != modules.end());
 348    :  
 349  E :    for (size_t i = 0; i < modules.size(); ++i) {
 350  E :      DCHECK(modules[i] != NULL);
 351  E :      LogModule(modules[i]);
 352  E :    }
 353    :  
 354    :    // We need to flush module events right away, so that the module is
 355    :    // defined in the trace file before events using that module start to
 356    :    // occur (in another thread).
 357  E :    FlushSegment();
 358  E :  }
 359    :  
 360  E :  void Profiler::ThreadState::LogModule(HMODULE module) {
 361  E :    batch_ = NULL;
 362  E :    agent::common::LogModule(module, &profiler_->session_, &segment_);
 363  E :  }
 364    :  
 365    :  void Profiler::ThreadState::LogThreadName(
 366  E :      const base::StringPiece& thread_name) {
 367  E :    if (thread_name.empty())
 368  i :      return;
 369    :  
 370    :    // Make sure the event we're about to write will fit.
 371  E :    if (!segment_.CanAllocate(thread_name.size() + 1) || !FlushSegment()) {
 372    :      // Failed to allocate a new segment.
 373  i :      return;
 374    :    }
 375    :  
 376  E :    DCHECK(segment_.CanAllocate(thread_name.size() + 1));
 377  E :    batch_ = NULL;
 378    :  
 379    :    // Allocate a record in the log.
 380    :    TraceThreadNameInfo* thread_name_event =
 381    :        reinterpret_cast<TraceThreadNameInfo*>(
 382    :          segment_.AllocateTraceRecordImpl(
 383  E :              TRACE_THREAD_NAME, thread_name.size() + 1));
 384  E :    DCHECK(thread_name_event != NULL);
 385    :    base::strlcpy(thread_name_event->thread_name,
 386  E :                  thread_name.data(), thread_name.size() + 1);
 387  E :  }
 388    :  
 389    :  void Profiler::ThreadState::OnFunctionEntry(EntryFrame* entry_frame,
 390    :                                              FuncAddr function,
 391  E :                                              uint64 cycles) {
 392  E :    if (profiler_->session_.IsDisabled())
 393  E :      return;
 394    :  
 395    :    // Record the details of the entry.
 396    :    // Note that on tail-recursion and tail-call elimination, the caller recorded
 397    :    // here will be a thunk. We cater for this case on exit as best we can.
 398  E :    ThunkData* data = MakeThunk(entry_frame->retaddr);
 399  E :    DCHECK(data != NULL);
 400  E :    data->caller = entry_frame->retaddr;
 401  E :    data->function = function;
 402  E :    data->cycles_entry = cycles - cycles_overhead_;
 403    :  
 404  E :    entry_frame->retaddr = data->thunk;
 405    :  
 406  E :    UpdateOverhead(cycles);
 407  E :  }
 408    :  
 409    :  void Profiler::ThreadState::OnFunctionExit(const ThunkData* data,
 410  E :                                             uint64 cycles_exit) {
 411    :    // Calculate the number of cycles in the invocation, exclusive our overhead.
 412  E :    uint64 cycles_executed = cycles_exit - cycles_overhead_ - data->cycles_entry;
 413    :  
 414    :    // See if the return address resolves to a data, which indicates
 415    :    // tail recursion or tail call elimination. In that case we record the
 416    :    // calling function as caller, which isn't totally accurate as that'll
 417    :    // attribute the cost to the first line of the calling function. In the
 418    :    // absence of more information, it's the best we can do, however.
 419  E :    Thunk* ret_thunk = CastToThunk(data->caller);
 420  E :    if (ret_thunk == NULL) {
 421  E :      RecordInvocation(data->caller, data->function, cycles_executed);
 422  E :    } else {
 423  E :      ThunkData* ret_data = DataFromThunk(ret_thunk);
 424  E :      RecordInvocation(ret_data->function, data->function, cycles_executed);
 425    :    }
 426    :  
 427  E :    UpdateOverhead(cycles_exit);
 428  E :  }
 429    :  
 430  E :  void Profiler::ThreadState::OnPageAdded(const void* page) {
 431  E :    profiler_->OnPageAdded(page);
 432  E :  }
 433    :  
 434  E :  void Profiler::ThreadState::OnPageRemoved(const void* page) {
 435  E :    profiler_->OnPageRemoved(page);
 436  E :  }
 437    :  
 438    :  void Profiler::ThreadState::RecordInvocation(RetAddr caller,
 439    :                                               FuncAddr function,
 440  E :                                               uint64 duration_cycles) {
 441    :    // See whether we've already recorded an entry for this function.
 442  E :    InvocationKey key(caller, function);
 443  E :    InvocationMap::iterator it = invocations_.find(key);
 444  E :    if (it != invocations_.end()) {
 445    :      // Yup, we already have an entry. Tally the new data.
 446  E :      InvocationInfo* info = it->second;
 447  E :      ++(info->num_calls);
 448  E :      info->cycles_sum += duration_cycles;
 449  E :      if (duration_cycles < info->cycles_min) {
 450  E :        info->cycles_min = duration_cycles;
 451  E :      } else if (duration_cycles > info->cycles_max) {
 452  E :        info->cycles_max = duration_cycles;
 453    :      }
 454  E :    } else {
 455    :      // The allocation below may touch last error.
 456  E :      ScopedLastErrorKeeper keep_last_error;
 457    :  
 458    :      // Nopes, allocate a new entry for this invocation.
 459  E :      InvocationInfo* info = AllocateInvocationInfo();
 460  E :      if (info != NULL) {
 461  E :        invocations_[key] = info;
 462  E :        info->caller = caller;
 463  E :        info->function = function;
 464  E :        info->num_calls = 1;
 465  E :        info->cycles_min = info->cycles_max = info->cycles_sum = duration_cycles;
 466    :      }
 467  E :    }
 468  E :  }
 469    :  
 470  E :  void Profiler::ThreadState::UpdateOverhead(uint64 entry_cycles) {
 471    :    // TODO(siggi): Measure the fixed overhead on setup,
 472    :    //     then add it on every update.
 473  E :    cycles_overhead_ += (__rdtsc() - entry_cycles);
 474  E :  }
 475    :  
 476  E :  InvocationInfo* Profiler::ThreadState::AllocateInvocationInfo() {
 477    :    // This is kind of self-evident for the moment, as an invocation info batch
 478    :    // contains at least one invocation info as currently declared.
 479    :    // If this fails, please reconsider your implementation, or else revisit
 480    :    // the allocation code below.
 481    :    COMPILE_ASSERT(sizeof(TraceBatchInvocationInfo) >= sizeof(InvocationInfo),
 482    :                   invocation_info_batch_must_be_larger_than_invocation_info);
 483    :  
 484    :    // Do we have a record that we can grow?
 485  E :    if (batch_ != NULL && segment_.CanAllocateRaw(sizeof(InvocationInfo))) {
 486    :      InvocationInfo* invocation_info =
 487  E :          reinterpret_cast<InvocationInfo*>(segment_.write_ptr);
 488  E :      RecordPrefix* prefix = trace::client::GetRecordPrefix(batch_);
 489  E :      prefix->size += sizeof(InvocationInfo);
 490    :  
 491    :      // Update the book-keeping.
 492  E :      segment_.write_ptr += sizeof(InvocationInfo);
 493  E :      segment_.header->segment_length += sizeof(InvocationInfo);
 494    :  
 495  E :      return invocation_info;
 496    :    }
 497    :  
 498    :    // Do we need to scarf a new buffer?
 499    :    if (!segment_.CanAllocate(sizeof(TraceBatchInvocationInfo)) &&
 500  E :        !FlushSegment()) {
 501    :      // We failed to allocate a new buffer.
 502  i :      return NULL;
 503    :    }
 504    :  
 505  E :    DCHECK(segment_.header != NULL);
 506    :  
 507  E :    batch_ = segment_.AllocateTraceRecord<TraceBatchInvocationInfo>();
 508  E :    return &batch_->invocations[0];
 509  E :  }
 510    :  
 511  E :  bool Profiler::ThreadState::FlushSegment() {
 512  E :    batch_ = NULL;
 513  E :    invocations_.clear();
 514    :  
 515  E :    return profiler_->session_.ExchangeBuffer(&segment_);
 516  E :  }
 517    :  
 518  E :  void Profiler::OnThreadDetach() {
 519  E :    ThreadState* state = GetThreadState();
 520  E :    if (state != NULL)
 521  E :      thread_state_manager_.MarkForDeath(state);
 522  E :  }
 523    :  
 524  E :  RetAddr* Profiler::ResolveReturnAddressLocation(RetAddr* pc_location) {
 525  E :    base::AutoLock lock(lock_);
 526    :  
 527    :    // In case of tail-call and tail recursion elimination, we can get chained
 528    :    // thunks, so we loop around here until we resolve to a non-thunk.
 529  E :    while (true) {
 530    :      // See whether the return address is one of our thunks.
 531  E :      RetAddr ret_addr = *pc_location;
 532    :  
 533    :      // Compute the page this return address lives in.
 534    :      const void* page = reinterpret_cast<const void*>(
 535  E :          reinterpret_cast<uintptr_t>(ret_addr) & ~0xFFF);
 536  E :      if (!std::binary_search(pages_.begin(), pages_.end(), page))
 537  E :        return pc_location;
 538    :  
 539    :      // It's one of our own, redirect to the thunk's stash.
 540    :      ThreadState::Thunk* thunk =
 541  E :          reinterpret_cast<ThreadState::Thunk*>(const_cast<void*>(ret_addr));
 542    :  
 543  E :      ThreadState::ThunkData* data = ThreadState::DataFromThunk(thunk);
 544    :  
 545    :      // Update the PC location and go around again, in case this
 546    :      // thunk links to another one.
 547  E :      pc_location = &data->caller;
 548  E :    }
 549  E :  }
 550    :  
 551    :  void Profiler::OnModuleEntry(EntryFrame* entry_frame,
 552    :                               FuncAddr function,
 553  E :                               uint64 cycles) {
 554    :    // The function invoked has a DllMain-like signature.
 555    :    // Get the module and reason from its invocation record.
 556  E :    HMODULE module = reinterpret_cast<HMODULE>(entry_frame->args[0]);
 557  E :    DWORD reason = entry_frame->args[1];
 558    :  
 559    :    // Only log module additions.
 560  E :    bool should_log_module = false;
 561  E :    switch (reason) {
 562    :      case DLL_PROCESS_ATTACH:
 563    :      case DLL_THREAD_ATTACH:
 564  E :        should_log_module = true;
 565  E :        break;
 566    :  
 567    :      case DLL_PROCESS_DETACH:
 568    :      case DLL_THREAD_DETACH:
 569  i :        break;
 570    :  
 571    :      default:
 572  i :        LOG(WARNING) << "Unrecognized module event: " << reason << ".";
 573    :        break;
 574    :    }
 575    :  
 576    :    // Make sure we only log each module once per process.
 577  E :    bool is_new_module = false;
 578  E :    if (should_log_module) {
 579  E :      base::AutoLock lock(lock_);
 580    :  
 581  E :      is_new_module = logged_modules_.insert(module).second;
 582  E :    }
 583    :  
 584  E :    ThreadState* data = GetOrAllocateThreadState();
 585  E :    DCHECK(data != NULL);
 586  E :    if (data == NULL)
 587  i :      return;
 588    :  
 589  E :    if (is_new_module) {
 590    :      // Delegate the logging to our per-thread data.
 591  E :      data->LogAllModules(module);
 592    :    }
 593    :  
 594    :    // Handle the function entry.
 595  E :    data->OnFunctionEntry(entry_frame, function, cycles);
 596  E :  }
 597    :  
 598  E :  void Profiler::OnPageAdded(const void* page) {
 599  E :    base::AutoLock lock(lock_);
 600    :  
 601    :    PageVector::iterator it =
 602  E :        std::lower_bound(pages_.begin(), pages_.end(), page);
 603  E :    DCHECK(it == pages_.end() || *it != page);
 604  E :    pages_.insert(it, page);
 605  E :  }
 606    :  
 607  E :  void Profiler::OnPageRemoved(const void* page) {
 608  E :    base::AutoLock lock(lock_);
 609    :  
 610    :    PageVector::iterator it =
 611  E :        std::lower_bound(pages_.begin(), pages_.end(), page);
 612    :    // The page must be in our list.
 613  E :    DCHECK(it != pages_.end());
 614  E :    DCHECK_EQ(page, *it);
 615  E :    pages_.erase(it);
 616  E :  }
 617    :  
 618  E :  void Profiler::OnThreadName(const base::StringPiece& thread_name) {
 619  E :    ThreadState* state = GetOrAllocateThreadState();
 620  E :    if (state != NULL)
 621  E :      state->LogThreadName(thread_name);
 622  E :  }
 623    :  
 624  E :  LONG CALLBACK Profiler::ExceptionHandler(EXCEPTION_POINTERS* ex_info) {
 625    :    // Log the thread if this is the VC thread name exception.
 626    :    if (ex_info->ExceptionRecord->ExceptionCode == kVCThreadNameException &&
 627    :        ex_info->ExceptionRecord->NumberParameters ==
 628  E :            sizeof(THREADNAME_INFO)/sizeof(DWORD)) {
 629    :      const THREADNAME_INFO* info =
 630    :          reinterpret_cast<const THREADNAME_INFO*>(
 631  E :              &ex_info->ExceptionRecord->ExceptionInformation);
 632    :  
 633  E :      if (info->dwType == 0x1000) {
 634  E :        Profiler* instance = Profiler::Instance();
 635  E :        if (instance != NULL)
 636  E :          instance->OnThreadName(info->szName);
 637  E :      } else {
 638  i :        LOG(WARNING) << "Unrecognised event type " << info->dwType;
 639    :      }
 640    :    }
 641    :  
 642  E :    return EXCEPTION_CONTINUE_SEARCH;
 643  E :  }
 644    :  
 645    :  
 646  E :  Profiler* Profiler::Instance() {
 647  E :    return static_profiler_instance.Pointer();
 648  E :  }
 649    :  
 650  E :  Profiler::Profiler() : handler_registration_(NULL) {
 651    :    // Create our RPC session and allocate our initial trace segment on first use.
 652  E :    ThreadState* data = CreateFirstThreadStateAndSession();
 653  E :    CHECK(data != NULL) << "Failed to allocate thread local state.";
 654    :  
 655  E :    handler_registration_ = ::AddVectoredExceptionHandler(TRUE, ExceptionHandler);
 656  E :  }
 657    :  
 658  E :  Profiler::~Profiler() {
 659    :    // Typically, this will happen on the last thread in the process. We must
 660    :    // explicitly clean up this thread's state as it will otherwise leak.
 661  E :    FreeThreadState();
 662    :  
 663    :    // Unregister our VEH.
 664  E :    if (handler_registration_ != NULL) {
 665  E :      ::RemoveVectoredExceptionHandler(handler_registration_);
 666  E :      handler_registration_ = NULL;
 667    :    }
 668  E :  }
 669    :  
 670  E :  Profiler::ThreadState* Profiler::CreateFirstThreadStateAndSession() {
 671  E :    Profiler::ThreadState* data = GetOrAllocateThreadStateImpl();
 672    :  
 673    :    // Create the session (and allocate the first segment).
 674  E :    trace::client::InitializeRpcSession(&session_, data->segment());
 675    :  
 676  E :    return data;
 677  E :  }
 678    :  
 679  E :  Profiler::ThreadState* Profiler::GetOrAllocateThreadState() {
 680  E :    Profiler::ThreadState* data = GetOrAllocateThreadStateImpl();
 681  E :    if (!data->segment()->write_ptr && session_.IsTracing()) {
 682  E :      session_.AllocateBuffer(data->segment());
 683    :    }
 684  E :    return data;
 685  E :  }
 686    :  
 687  E :  Profiler::ThreadState* Profiler::GetOrAllocateThreadStateImpl() {
 688  E :    ThreadState *data = tls_.Get();
 689  E :    if (data != NULL)
 690  E :      return data;
 691    :  
 692  E :    data = new ThreadState(this);
 693  E :    if (data == NULL) {
 694  i :      LOG(ERROR) << "Unable to allocate per-thread data";
 695  i :      return NULL;
 696    :    }
 697    :  
 698  E :    thread_state_manager_.Register(data);
 699  E :    tls_.Set(data);
 700    :  
 701  E :    return data;
 702  E :  }
 703    :  
 704  E :  Profiler::ThreadState* Profiler::GetThreadState() const {
 705  E :    return tls_.Get();
 706  E :  }
 707    :  
 708  E :  void Profiler::FreeThreadState() {
 709  E :    ThreadState *data = GetThreadState();
 710  E :    if (data != NULL) {
 711  E :      tls_.Set(NULL);
 712  E :      thread_state_manager_.Unregister(data);
 713  E :      delete data;
 714    :    }
 715  E :  }
 716    :  
 717    :  void WINAPI Profiler::DllMainEntryHook(EntryFrame* entry_frame,
 718    :                                         FuncAddr function,
 719  E :                                         uint64 cycles) {
 720  E :    ScopedLastErrorKeeper keep_last_error;
 721    :  
 722  E :    Profiler* profiler = Profiler::Instance();
 723  E :    profiler->OnModuleEntry(entry_frame, function, cycles);
 724  E :  }
 725    :  
 726    :  void WINAPI Profiler::FunctionEntryHook(EntryFrame* entry_frame,
 727    :                                          FuncAddr function,
 728  E :                                          uint64 cycles) {
 729  E :    ScopedLastErrorKeeper keep_last_error;
 730    :  
 731  E :    Profiler* profiler = Profiler::Instance();
 732  E :    ThreadState* data = profiler->GetOrAllocateThreadState();
 733  E :    DCHECK(data != NULL);
 734  E :    if (data != NULL)
 735  E :      data->OnFunctionEntry(entry_frame, function, cycles);
 736  E :  }
 737    :  
 738    :  }  // namespace profiler
 739    :  }  // namespace agent

Coverage information generated Thu Mar 14 11:53:36 2013.