Coverage for /Syzygy/trace/service/session_unittest.cc

CoverageLines executed / instrumented / missingexe / inst / missLanguageGroup
99.6%2612620.C++test

Line-by-line coverage:

   1    :  // Copyright 2012 Google Inc. All Rights Reserved.
   2    :  //
   3    :  // Licensed under the Apache License, Version 2.0 (the "License");
   4    :  // you may not use this file except in compliance with the License.
   5    :  // You may obtain a copy of the License at
   6    :  //
   7    :  //     http://www.apache.org/licenses/LICENSE-2.0
   8    :  //
   9    :  // Unless required by applicable law or agreed to in writing, software
  10    :  // distributed under the License is distributed on an "AS IS" BASIS,
  11    :  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12    :  // See the License for the specific language governing permissions and
  13    :  // limitations under the License.
  14    :  
  15    :  #include "syzygy/trace/service/session.h"
  16    :  
  17    :  #include "base/atomicops.h"
  18    :  #include "base/bind.h"
  19    :  #include "base/callback.h"
  20    :  #include "base/environment.h"
  21    :  #include "base/files/file_util.h"
  22    :  #include "base/files/scoped_temp_dir.h"
  23    :  #include "base/memory/scoped_ptr.h"
  24    :  #include "base/strings/stringprintf.h"
  25    :  #include "base/strings/utf_string_conversions.h"
  26    :  #include "base/threading/thread.h"
  27    :  #include "gtest/gtest.h"
  28    :  #include "syzygy/trace/protocol/call_trace_defs.h"
  29    :  #include "syzygy/trace/service/service.h"
  30    :  #include "syzygy/trace/service/service_rpc_impl.h"
  31    :  #include "syzygy/trace/service/session_trace_file_writer.h"
  32    :  #include "syzygy/trace/service/session_trace_file_writer_factory.h"
  33    :  
  34    :  namespace trace {
  35    :  namespace service {
  36    :  
  37    :  namespace {
  38    :  
  39    :  class TestSessionTraceFileWriter : public SessionTraceFileWriter {
  40    :   public:
  41    :    explicit TestSessionTraceFileWriter(
  42    :        base::MessageLoop* message_loop, const base::FilePath& trace_directory)
  43    :        : SessionTraceFileWriter(message_loop, trace_directory),
  44  E :          num_buffers_to_recycle_(0) {
  45  E :      base::subtle::Barrier_AtomicIncrement(&num_instances_, 1);
  46  E :    }
  47    :  
  48  E :    ~TestSessionTraceFileWriter() {
  49  E :      base::subtle::Barrier_AtomicIncrement(&num_instances_, -1);
  50  E :    }
  51    :  
  52  E :    void RecycleBuffers() {
  53  E :      queue_lock_.AssertAcquired();
  54    :  
  55  E :      while (!queue_.empty() && num_buffers_to_recycle_ != 0) {
  56  E :        Buffer* buffer = queue_.front();
  57  E :        queue_.pop_front();
  58    :  
  59  E :        ASSERT_TRUE(buffer != NULL);
  60  E :        ASSERT_EQ(buffer->session, session_ref_.get());
  61  E :        ASSERT_TRUE(
  62    :          SessionTraceFileWriter::ConsumeBuffer(buffer));
  63    :  
  64  E :        --num_buffers_to_recycle_;
  65  E :      }
  66    :  
  67    :      // If we've emptied the queue, release our reference to the session.
  68  E :      if (queue_.empty())
  69  E :        session_ref_ = reinterpret_cast<Session*>(NULL);
  70  E :    }
  71    :  
  72  E :    void AllowBuffersToBeRecycled(size_t num_buffers) {
  73  E :      base::AutoLock auto_lock(queue_lock_);
  74    :  
  75  E :      num_buffers_to_recycle_ = num_buffers;
  76  E :      RecycleBuffers();
  77  E :    }
  78    :  
  79  E :    virtual bool ConsumeBuffer(Buffer* buffer) override {
  80  E :      base::AutoLock auto_lock(queue_lock_);
  81  E :      EXPECT_TRUE(buffer != NULL);
  82  E :      if (buffer) {
  83    :        // While there are buffers in the queue, keep a reference to the session.
  84  E :        if (queue_.empty()) {
  85  E :          EXPECT_TRUE(session_ref_.get() == NULL);
  86  E :          EXPECT_TRUE(buffer->session != NULL);
  87  E :          session_ref_ = buffer->session;
  88    :        }
  89    :  
  90    :        // Put the buffer into the consumer queue.
  91  E :        queue_.push_back(buffer);
  92    :      }
  93    :  
  94  E :      RecycleBuffers();
  95    :  
  96  E :      return buffer != NULL;
  97  E :    }
  98    :  
  99  E :    static base::subtle::Atomic32 num_instances() {
 100  E :      return base::subtle::Acquire_Load(&num_instances_);
 101  E :    }
 102    :  
 103    :   protected:
 104    :    // The queue of buffers to be consumed.
 105    :    std::deque<Buffer*> queue_;
 106    :  
 107    :    // This keeps the session object alive while there are buffers in the queue.
 108    :    scoped_refptr<Session> session_ref_;
 109    :  
 110    :    // A lock to protect access to the queue and session reference.
 111    :    base::Lock queue_lock_;
 112    :  
 113    :    // The number of buffers to recycle berfore pausing.
 114    :    size_t num_buffers_to_recycle_;
 115    :  
 116    :    // The number of active writer instances.
 117    :    // @note All accesses to this member should be via base/atomicops.h functions.
 118    :    static volatile base::subtle::Atomic32 num_instances_;
 119    :  };
 120    :  
 121    :  volatile base::subtle::Atomic32 TestSessionTraceFileWriter::num_instances_ = 0;
 122    :  
 123    :  class TestSessionTraceFileWriterFactory : public SessionTraceFileWriterFactory {
 124    :   public:
 125  E :    explicit TestSessionTraceFileWriterFactory(base::MessageLoop* message_loop)
 126    :        : SessionTraceFileWriterFactory(message_loop) {
 127  E :    }
 128    :  
 129  E :    bool CreateConsumer(scoped_refptr<BufferConsumer>* consumer) override {
 130    :      // w00t, somewhat bogus coverage ploy, at least will reuse the DCHECKS.
 131  E :      EXPECT_TRUE(SessionTraceFileWriterFactory::CreateConsumer(consumer));
 132  E :      EXPECT_TRUE((*consumer)->HasOneRef());
 133    :  
 134    :      *consumer = new TestSessionTraceFileWriter(
 135  E :         message_loop_, trace_file_directory_);
 136  E :      return true;
 137  E :    }
 138    :  };
 139    :  
 140    :  class TestSession : public Session {
 141    :   public:
 142    :    explicit TestSession(Service* service)
 143    :        : Session(service),
 144    :          waiting_for_buffer_to_be_recycled_(&lock_),
 145    :          waiting_for_buffer_to_be_recycled_state_(false),
 146    :          destroying_singleton_buffer_(&lock_),
 147    :          destroying_singleton_buffer_state_(false),
 148    :          last_singleton_buffer_destroyed_(NULL),
 149    :          singleton_buffers_destroyed_(0),
 150    :          allocating_buffers_(&lock_),
 151  E :          allocating_buffers_state_(false) {
 152  E :    }
 153    :  
 154  E :    void AllowBuffersToBeRecycled(size_t num_buffers) {
 155    :      static_cast<TestSessionTraceFileWriter*>(
 156  E :          buffer_consumer())->AllowBuffersToBeRecycled(num_buffers);
 157  E :    }
 158    :  
 159  E :    void ClearWaitingForBufferToBeRecycledState() {
 160  E :      base::AutoLock lock(lock_);
 161  E :      waiting_for_buffer_to_be_recycled_state_ = false;
 162  E :    }
 163    :  
 164  E :    void PauseUntilWaitingForBufferToBeRecycled() {
 165  E :      base::AutoLock lock(lock_);
 166  E :      while (!waiting_for_buffer_to_be_recycled_state_)
 167  E :        waiting_for_buffer_to_be_recycled_.Wait();
 168  E :      waiting_for_buffer_to_be_recycled_state_ = false;
 169  E :    }
 170    :  
 171    :    void ClearDestroyingSingletonBufferState() {
 172    :      base::AutoLock lock(lock_);
 173    :      destroying_singleton_buffer_state_ = false;
 174    :    }
 175    :  
 176  E :    void PauseUntilDestroyingSingletonBuffer() {
 177  E :      base::AutoLock lock(lock_);
 178  E :      while (!destroying_singleton_buffer_state_)
 179  E :        destroying_singleton_buffer_.Wait();
 180  E :      destroying_singleton_buffer_state_ = true;
 181  E :    }
 182    :  
 183  E :    void ClearAllocatingBuffersState() {
 184  E :      base::AutoLock lock(lock_);
 185  E :      allocating_buffers_state_ = false;
 186  E :    }
 187    :  
 188  E :    void PauseUntilAllocatingBuffers() {
 189  E :      base::AutoLock lock(lock_);
 190  E :      while (!allocating_buffers_state_)
 191  E :        allocating_buffers_.Wait();
 192  E :      waiting_for_buffer_to_be_recycled_state_ = false;
 193  E :    }
 194    :  
 195  E :    size_t buffer_requests_waiting_for_recycle() {
 196  E :      base::AutoLock lock(lock_);
 197  E :      return buffer_requests_waiting_for_recycle_;
 198  E :    }
 199    :  
 200  E :    virtual void OnWaitingForBufferToBeRecycled() override {
 201  E :      lock_.AssertAcquired();
 202  E :      waiting_for_buffer_to_be_recycled_state_ = true;
 203  E :      waiting_for_buffer_to_be_recycled_.Signal();
 204  E :    }
 205    :  
 206  E :    virtual void OnDestroySingletonBuffer(Buffer* buffer) override {
 207  E :      lock_.AssertAcquired();
 208  E :      last_singleton_buffer_destroyed_ = buffer;
 209  E :      singleton_buffers_destroyed_++;
 210  E :      destroying_singleton_buffer_state_ = true;
 211  E :      destroying_singleton_buffer_.Signal();
 212  E :    }
 213    :  
 214    :    bool InitializeProcessInfo(ProcessId process_id,
 215  E :                               ProcessInfo* client) override {
 216  E :      DCHECK(client != NULL);
 217    :  
 218    :      // Lobotomize the process info initialization to allow using fake PIDs.
 219  E :      client->process_id = process_id;
 220    :      const DWORD kFlags =
 221  E :          PROCESS_DUP_HANDLE | PROCESS_QUERY_INFORMATION | PROCESS_VM_READ;
 222    :      client->process_handle.Set(
 223  E :          ::OpenProcess(kFlags, FALSE, ::GetCurrentProcessId()));
 224    :      static const wchar_t kEnvironment[] = L"asdf=fofofo\0";
 225    :      client->environment.assign(kEnvironment,
 226  E :                                 kEnvironment + arraysize(kEnvironment));
 227    :  
 228  E :      return true;
 229  E :    }
 230    :  
 231    :    bool CopyBufferHandleToClient(HANDLE client_process_handle,
 232    :                                  HANDLE local_handle,
 233  E :                                  HANDLE* client_copy) override {
 234    :      // Avoid handle leaks by using the same handle for both "ends".
 235  E :      *client_copy = local_handle;
 236  E :      return true;
 237  E :    }
 238    :  
 239  E :    virtual bool AllocateBuffers(size_t count, size_t size) override {
 240  E :      lock_.AssertAcquired();
 241    :  
 242  E :      allocating_buffers_state_ = true;
 243  E :      allocating_buffers_.Signal();
 244    :  
 245    :      // Forward this to the original implementation.
 246  E :      return Session::AllocateBuffers(count, size);
 247  E :    }
 248    :  
 249    :    // Under lock_.
 250    :    base::ConditionVariable waiting_for_buffer_to_be_recycled_;
 251    :    bool waiting_for_buffer_to_be_recycled_state_;
 252    :  
 253    :    // Under lock_.
 254    :    base::ConditionVariable destroying_singleton_buffer_;
 255    :    bool destroying_singleton_buffer_state_;
 256    :    Buffer* last_singleton_buffer_destroyed_;
 257    :    size_t singleton_buffers_destroyed_;
 258    :  
 259    :    // Under lock_.
 260    :    base::ConditionVariable allocating_buffers_;
 261    :    bool allocating_buffers_state_;
 262    :  };
 263    :  
 264    :  typedef scoped_refptr<TestSession> TestSessionPtr;
 265    :  
 266    :  class TestService : public Service {
 267    :   public:
 268    :    explicit TestService(BufferConsumerFactory* factory)
 269    :        : Service(factory),
 270  E :          process_id_(0xfafafa) {
 271  E :    }
 272    :  
 273  E :    TestSessionPtr CreateTestSession() {
 274  E :      scoped_refptr<Session> session;
 275  E :      if (!GetNewSession(++process_id_, &session))
 276  i :        return NULL;
 277    :  
 278  E :      return TestSessionPtr(static_cast<TestSession*>(session.get()));
 279  E :    }
 280    :  
 281  E :    size_t num_active_sessions() const { return num_active_sessions_; }
 282    :  
 283    :   protected:
 284  E :    virtual Session* CreateSession() override { return new TestSession(this); }
 285    :  
 286    :   private:
 287    :    uint32 process_id_;  // Under lock_;
 288    :  };
 289    :  
 290    :  class SessionTest : public ::testing::Test {
 291    :   public:
 292    :    SessionTest()
 293    :        : consumer_thread_("session-test-consumer-thread"),
 294    :          consumer_thread_has_started_(
 295    :              consumer_thread_.StartWithOptions(
 296    :                  base::Thread::Options(base::MessageLoop::TYPE_IO, 0))),
 297    :          session_trace_file_writer_factory_(consumer_thread_.message_loop()),
 298    :          call_trace_service_(&session_trace_file_writer_factory_),
 299    :          rpc_service_instance_manager_(&call_trace_service_),
 300    :          worker1_("Worker1"),
 301  E :          worker2_("Worker2") {
 302  E :    }
 303    :  
 304  E :    virtual void SetUp() override {
 305  E :      testing::Test::SetUp();
 306    :  
 307  E :      ASSERT_TRUE(consumer_thread_has_started_);
 308  E :      EXPECT_EQ(0, call_trace_service_.num_active_sessions());
 309  E :      EXPECT_EQ(0, TestSessionTraceFileWriter::num_instances());
 310    :  
 311    :      // Setup the buffer management to make it easy to force buffer contention.
 312  E :      call_trace_service_.set_num_incremental_buffers(2);
 313  E :      call_trace_service_.set_buffer_size_in_bytes(8192);
 314    :  
 315    :      // Create a temporary directory for the call trace files.
 316  E :      ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
 317  E :      ASSERT_TRUE(session_trace_file_writer_factory_.SetTraceFileDirectory(
 318    :          temp_dir_.path()));
 319    :  
 320    :      // We give the service instance a "unique" id so that it does not interfere
 321    :      // with any other instances or tests that might be concurrently active.
 322  E :      std::string instance_id(base::StringPrintf("%d", ::GetCurrentProcessId()));
 323  E :      call_trace_service_.set_instance_id(base::UTF8ToWide(instance_id));
 324    :  
 325    :      // The instance id needs to be in the environment to be picked up by the
 326    :      // client library. We prefix the existing environment variable, if any.
 327  E :      scoped_ptr<base::Environment> env(base::Environment::Create());
 328  E :      ASSERT_FALSE(env.get() == NULL);
 329  E :      std::string env_var;
 330  E :      env->GetVar(::kSyzygyRpcInstanceIdEnvVar, &env_var);
 331  E :      env_var.insert(0, ";");
 332  E :      env_var.insert(0, instance_id);
 333  E :      ASSERT_TRUE(env->SetVar(::kSyzygyRpcInstanceIdEnvVar, env_var));
 334    :  
 335    :      // Start our worker threads so we can use them later.
 336  E :      ASSERT_TRUE(worker1_.Start());
 337  E :      ASSERT_TRUE(worker2_.Start());
 338  E :    }
 339    :  
 340  E :    virtual void TearDown() override {
 341    :      // Stop the worker threads.
 342  E :      worker2_.Stop();
 343  E :      worker1_.Stop();
 344    :  
 345    :      // Stop the call trace service.
 346  E :      EXPECT_TRUE(call_trace_service_.Stop());
 347  E :      EXPECT_FALSE(call_trace_service_.is_running());
 348  E :      EXPECT_EQ(0, call_trace_service_.num_active_sessions());
 349  E :      EXPECT_EQ(0, TestSessionTraceFileWriter::num_instances());
 350  E :    }
 351    :  
 352    :   protected:
 353    :    // The thread on which the trace file writer will consumer buffers and a
 354    :    // helper variable whose initialization we use as a trigger to start the
 355    :    // thread (ensuring it's message_loop is created). These declarations MUST
 356    :    // remain in this order and preceed that of trace_file_writer_factory_;
 357    :    base::Thread consumer_thread_;
 358    :    bool consumer_thread_has_started_;
 359    :  
 360    :    // The call trace service related objects. These declarations MUST be in
 361    :    // this order.
 362    :    TestSessionTraceFileWriterFactory session_trace_file_writer_factory_;
 363    :    TestService call_trace_service_;
 364    :    RpcServiceInstanceManager rpc_service_instance_manager_;
 365    :  
 366    :    // The directory where trace file output will be written.
 367    :    base::ScopedTempDir temp_dir_;
 368    :  
 369    :    // A couple of worker threads where we can dispatch closures.
 370    :    base::Thread worker1_;
 371    :    base::Thread worker2_;
 372    :  };
 373    :  
 374  E :  void GetNextBuffer(Session* session, Buffer** buffer, bool* result) {
 375  E :    DCHECK(session != NULL);
 376  E :    DCHECK(buffer != NULL);
 377  E :    DCHECK(result != NULL);
 378  E :    *buffer = NULL;
 379  E :    *result = session->GetNextBuffer(buffer);
 380  E :  }
 381    :  
 382    :  }  // namespace
 383    :  
 384  E :  TEST_F(SessionTest, ReturnBufferWorksAfterSessionClose) {
 385  E :    ASSERT_TRUE(call_trace_service_.Start(true));
 386    :  
 387  E :    TestSessionPtr session = call_trace_service_.CreateTestSession();
 388  E :    ASSERT_TRUE(session != NULL);
 389    :  
 390  E :    Buffer* buffer1 = NULL;
 391  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer1));
 392  E :    ASSERT_TRUE(buffer1 != NULL);
 393    :  
 394  E :    ASSERT_TRUE(session->Close());
 395    :  
 396    :    // Closing the session should have forced all buffers to be submitted to
 397    :    // the write queue.
 398  E :    ASSERT_EQ(Buffer::kPendingWrite, buffer1->state);
 399    :  
 400    :    // A request for another buffer should fail.
 401  E :    Buffer* buffer2 = NULL;
 402  E :    ASSERT_FALSE(session->GetNextBuffer(&buffer2));
 403  E :    ASSERT_TRUE(buffer2 == NULL);
 404    :  
 405    :    // Returning the original buffer should be a noop, but it should succeed.
 406    :    // Most of all, it shouldn't cause a race condition.
 407  E :    ASSERT_TRUE(session->ReturnBuffer(buffer1));
 408    :  
 409    :    // Let's allow the outstanding buffers to be written.
 410  E :    session->AllowBuffersToBeRecycled(9999);
 411  E :  }
 412    :  
 413  E :  TEST_F(SessionTest, BackPressureWorks) {
 414    :    // Configure things so that back-pressure will be easily forced.
 415  E :    call_trace_service_.set_max_buffers_pending_write(1);
 416  E :    ASSERT_TRUE(call_trace_service_.Start(true));
 417    :  
 418  E :    TestSessionPtr session = call_trace_service_.CreateTestSession();
 419  E :    ASSERT_TRUE(session != NULL);
 420    :  
 421  E :    Buffer* buffer1 = NULL;
 422  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer1));
 423  E :    ASSERT_TRUE(buffer1 != NULL);
 424    :  
 425  E :    Buffer* buffer2 = NULL;
 426  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer2));
 427  E :    ASSERT_TRUE(buffer2 != NULL);
 428    :  
 429    :    // Return both buffers so we have 2 pending writes. Neither of these will
 430    :    // go through because we have not allowed any buffers to be written yet.
 431  E :    ASSERT_TRUE(session->ReturnBuffer(buffer1));
 432  E :    ASSERT_TRUE(session->ReturnBuffer(buffer2));
 433    :  
 434    :    // We don't care about events up until this point.
 435  E :    session->ClearWaitingForBufferToBeRecycledState();
 436    :  
 437    :    // Start the buffer getter. This launches another thread that will try to
 438    :    // get another buffer. This will be blocked because of the pending writes.
 439  E :    bool result3 = false;
 440  E :    Buffer* buffer3 = NULL;
 441    :    base::Closure buffer_getter3 = base::Bind(
 442  E :        &GetNextBuffer, session, &buffer3, &result3);
 443  E :    worker1_.message_loop()->PostTask(FROM_HERE, buffer_getter3);
 444    :  
 445    :    // Wait for the session to start applying back-pressure. This occurs when it
 446    :    // has indicated that it is waiting for a buffer to be written.
 447  E :    session->PauseUntilWaitingForBufferToBeRecycled();
 448    :  
 449    :    // Allow a single buffer to be written.
 450  E :    session->AllowBuffersToBeRecycled(1);
 451    :  
 452    :    // Wait for the buffer getter to complete.
 453  E :    worker1_.Stop();
 454    :  
 455    :    // Ensure the buffer was a recycled forced wait.
 456  E :    ASSERT_TRUE(result3);
 457  E :    ASSERT_EQ(buffer1, buffer3);
 458    :  
 459    :    // Return the last buffer and allow everything to be written.
 460  E :    ASSERT_TRUE(session->ReturnBuffer(buffer3));
 461  E :    session->AllowBuffersToBeRecycled(9999);
 462  E :  }
 463    :  
 464  E :  TEST_F(SessionTest, BackPressureIsLimited) {
 465    :    // Configure things so that back-pressure will be easily forced.
 466  E :    call_trace_service_.set_max_buffers_pending_write(1);
 467  E :    ASSERT_TRUE(call_trace_service_.Start(true));
 468    :  
 469  E :    TestSessionPtr session = call_trace_service_.CreateTestSession();
 470  E :    ASSERT_TRUE(session != NULL);
 471    :  
 472  E :    Buffer* buffer1 = NULL;
 473  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer1));
 474  E :    ASSERT_TRUE(buffer1 != NULL);
 475    :  
 476  E :    Buffer* buffer2 = NULL;
 477  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer2));
 478  E :    ASSERT_TRUE(buffer2 != NULL);
 479    :  
 480    :    // Return both buffers so we have 2 pending writes. Neither of these will
 481    :    // go through because we have not allowed any buffers to be written yet.
 482  E :    ASSERT_TRUE(session->ReturnBuffer(buffer1));
 483  E :    ASSERT_TRUE(session->ReturnBuffer(buffer2));
 484    :  
 485    :    // Since the back-pressure threshold is 1 and we have 2 pending buffers
 486    :    // if 1 is recycled it will bring us below the back-pressure threshold. Thus
 487    :    // if we pile on a lot of buffer requests, only the first one should apply
 488    :    // back-pressure, and the next ones should cause an allocation.
 489    :  
 490    :    // We don't care about events up until this point.
 491  E :    session->ClearWaitingForBufferToBeRecycledState();
 492  E :    session->ClearAllocatingBuffersState();
 493    :  
 494  E :    bool result3 = false;
 495  E :    Buffer* buffer3 = NULL;
 496    :    base::Closure buffer_getter3 = base::Bind(
 497  E :        &GetNextBuffer, session, &buffer3, &result3);
 498  E :    worker1_.message_loop()->PostTask(FROM_HERE, buffer_getter3);
 499    :  
 500    :    // Wait for the session to start applying back-pressure. This occurs when it
 501    :    // has indicated that it is waiting for a buffer to be written.
 502  E :    session->PauseUntilWaitingForBufferToBeRecycled();
 503    :  
 504    :    // At this point, there should be only one getter applying back pressure.
 505  E :    ASSERT_EQ(1u, session->buffer_requests_waiting_for_recycle());
 506    :  
 507    :    // Allocate yet another buffer on a new thread, this will force an allocation
 508    :    // which in turn will satisfy as many waits as there are buffers allocated.
 509  E :    bool result4 = false;
 510  E :    Buffer* buffer4 = NULL;
 511    :    base::Closure buffer_getter4 = base::Bind(
 512  E :        &GetNextBuffer, session, &buffer4, &result4);
 513  E :    worker2_.message_loop()->PostTask(FROM_HERE, buffer_getter4);
 514    :  
 515    :    // Similarly, wait for an allocation. The second buffer getter should cause
 516    :    // one to occur.
 517  E :    session->PauseUntilAllocatingBuffers();
 518    :  
 519    :    // Allow a single buffer to be written.
 520  E :    session->AllowBuffersToBeRecycled(1);
 521    :  
 522    :    // Wait for the buffer getters to complete.
 523  E :    worker1_.Stop();
 524  E :    worker2_.Stop();
 525  E :    ASSERT_TRUE(result3);
 526  E :    ASSERT_TRUE(result4);
 527    :  
 528    :    // We can't guarantee where the returned buffers come from (recycled or
 529    :    // not), just that they should be returned.
 530  E :    ASSERT_TRUE(buffer3 != NULL);
 531  E :    ASSERT_TRUE(buffer4 != NULL);
 532    :  
 533    :    // Return the last 2 buffers and allow everything to be written.
 534  E :    ASSERT_TRUE(session->ReturnBuffer(buffer3));
 535  E :    ASSERT_TRUE(session->ReturnBuffer(buffer4));
 536  E :    session->AllowBuffersToBeRecycled(9999);
 537  E :  }
 538    :  
 539  E :  TEST_F(SessionTest, LargeBufferRequestAvoidsBackPressure) {
 540    :    // Configure things so that back-pressure will be easily forced.
 541  E :    call_trace_service_.set_max_buffers_pending_write(1);
 542  E :    ASSERT_TRUE(call_trace_service_.Start(true));
 543    :  
 544  E :    TestSessionPtr session = call_trace_service_.CreateTestSession();
 545  E :    ASSERT_TRUE(session != NULL);
 546    :  
 547  E :    Buffer* buffer1 = NULL;
 548  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer1));
 549  E :    ASSERT_TRUE(buffer1 != NULL);
 550    :  
 551  E :    Buffer* buffer2 = NULL;
 552  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer2));
 553  E :    ASSERT_TRUE(buffer2 != NULL);
 554    :  
 555    :    // Return both buffers so we have 2 pending writes. Neither of these will
 556    :    // go through because we have not allowed any buffers to be written yet.
 557  E :    ASSERT_TRUE(session->ReturnBuffer(buffer1));
 558  E :    ASSERT_TRUE(session->ReturnBuffer(buffer2));
 559    :  
 560    :    // Ask for a big buffer. This should go through immediately and side-step the
 561    :    // usual buffer pool. Thus, it is not subject to back-pressure.
 562  E :    Buffer* buffer3 = NULL;
 563  E :    ASSERT_TRUE(session->GetBuffer(10 * 1024 * 1024, &buffer3));
 564  E :    ASSERT_EQ(10u * 1024 * 1024, buffer3->mapping_size);
 565  E :    ASSERT_EQ(10u * 1024 * 1024, buffer3->buffer_size);
 566  E :    ASSERT_EQ(0u, buffer3->buffer_offset);
 567    :  
 568    :    // Return the buffer and allow them all to be recycled.
 569  E :    ASSERT_TRUE(session->ReturnBuffer(buffer3));
 570  E :    session->AllowBuffersToBeRecycled(9999);
 571    :  
 572    :    // Wait until the singleton buffer has been destroyed.
 573  E :    session->PauseUntilDestroyingSingletonBuffer();
 574  E :    ASSERT_EQ(1, session->singleton_buffers_destroyed_);
 575  E :    ASSERT_EQ(buffer3, session->last_singleton_buffer_destroyed_);
 576  E :  }
 577    :  
 578    :  }  // namespace service
 579    :  }  // namespace trace

Coverage information generated Thu Jan 14 17:40:38 2016.