Coverage for /Syzygy/trace/service/session_unittest.cc

CoverageLines executed / instrumented / missingexe / inst / missLanguageGroup
99.6%2632640.C++test

Line-by-line coverage:

   1    :  // Copyright 2012 Google Inc. All Rights Reserved.
   2    :  //
   3    :  // Licensed under the Apache License, Version 2.0 (the "License");
   4    :  // you may not use this file except in compliance with the License.
   5    :  // You may obtain a copy of the License at
   6    :  //
   7    :  //     http://www.apache.org/licenses/LICENSE-2.0
   8    :  //
   9    :  // Unless required by applicable law or agreed to in writing, software
  10    :  // distributed under the License is distributed on an "AS IS" BASIS,
  11    :  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12    :  // See the License for the specific language governing permissions and
  13    :  // limitations under the License.
  14    :  
  15    :  #include "syzygy/trace/service/session.h"
  16    :  
  17    :  #include "base/atomicops.h"
  18    :  #include "base/bind.h"
  19    :  #include "base/callback.h"
  20    :  #include "base/environment.h"
  21    :  #include "base/file_util.h"
  22    :  #include "base/stringprintf.h"
  23    :  #include "base/utf_string_conversions.h"
  24    :  #include "base/files/scoped_temp_dir.h"
  25    :  #include "base/memory/scoped_ptr.h"
  26    :  #include "base/threading/thread.h"
  27    :  #include "gtest/gtest.h"
  28    :  #include "syzygy/trace/protocol/call_trace_defs.h"
  29    :  #include "syzygy/trace/service/service.h"
  30    :  #include "syzygy/trace/service/service_rpc_impl.h"
  31    :  #include "syzygy/trace/service/trace_file_writer.h"
  32    :  #include "syzygy/trace/service/trace_file_writer_factory.h"
  33    :  
  34    :  namespace trace {
  35    :  namespace service {
  36    :  
  37    :  namespace {
  38    :  
  39    :  class TestTraceFileWriter : public TraceFileWriter {
  40    :   public:
  41    :    explicit TestTraceFileWriter(MessageLoop* message_loop,
  42    :                                 const base::FilePath& trace_directory)
  43    :        : TraceFileWriter(message_loop, trace_directory),
  44  E :          num_buffers_to_recycle_(0) {
  45  E :      base::subtle::Barrier_AtomicIncrement(&num_instances_, 1);
  46  E :    }
  47    :  
  48  E :    ~TestTraceFileWriter() {
  49  E :      base::subtle::Barrier_AtomicIncrement(&num_instances_, -1);
  50  E :    }
  51    :  
  52  E :    void RecycleBuffers() {
  53  E :      queue_lock_.AssertAcquired();
  54    :  
  55  E :      while (!queue_.empty() && num_buffers_to_recycle_ != 0) {
  56  E :        Buffer* buffer = queue_.front();
  57  E :        queue_.pop_front();
  58    :  
  59  E :        ASSERT_TRUE(buffer != NULL);
  60  E :        ASSERT_EQ(buffer->session, session_ref_.get());
  61  E :        ASSERT_TRUE(
  62    :          TraceFileWriter::ConsumeBuffer(buffer));
  63    :  
  64  E :        --num_buffers_to_recycle_;
  65  E :      }
  66    :  
  67    :      // If we've emptied the queue, release our reference to the session.
  68  E :      if (queue_.empty())
  69  E :        session_ref_ = reinterpret_cast<Session*>(NULL);
  70  E :    }
  71    :  
  72  E :    void AllowBuffersToBeRecycled(size_t num_buffers) {
  73  E :      base::AutoLock auto_lock(queue_lock_);
  74    :  
  75  E :      num_buffers_to_recycle_ = num_buffers;
  76  E :      RecycleBuffers();
  77  E :    }
  78    :  
  79  E :    virtual bool ConsumeBuffer(Buffer* buffer) OVERRIDE {
  80  E :      base::AutoLock auto_lock(queue_lock_);
  81  E :      EXPECT_TRUE(buffer != NULL);
  82  E :      if (buffer) {
  83    :        // While there are buffers in the queue, keep a reference to the session.
  84  E :        if (queue_.empty()) {
  85  E :          EXPECT_TRUE(session_ref_.get() == NULL);
  86  E :          EXPECT_TRUE(buffer->session != NULL);
  87  E :          session_ref_ = buffer->session;
  88    :        }
  89    :  
  90    :        // Put the buffer into the consumer queue.
  91  E :        queue_.push_back(buffer);
  92    :      }
  93    :  
  94  E :      RecycleBuffers();
  95    :  
  96  E :      return buffer != NULL;
  97  E :    }
  98    :  
  99  E :    static base::subtle::Atomic32 num_instances() {
 100  E :      return base::subtle::Acquire_Load(&num_instances_);
 101  E :    }
 102    :  
 103    :   protected:
 104    :    // The queue of buffers to be consumed.
 105    :    std::deque<Buffer*> queue_;
 106    :  
 107    :    // This keeps the session object alive while there are buffers in the queue.
 108    :    scoped_refptr<Session> session_ref_;
 109    :  
 110    :    // A lock to protect access to the queue and session reference.
 111    :    base::Lock queue_lock_;
 112    :  
 113    :    // The number of buffers to recycle berfore pausing.
 114    :    size_t num_buffers_to_recycle_;
 115    :  
 116    :    // The number of active writer instances.
 117    :    // @note All accesses to this member should be via base/atomicops.h functions.
 118    :    static volatile base::subtle::Atomic32 num_instances_;
 119    :  };
 120    :  
 121    :  volatile base::subtle::Atomic32 TestTraceFileWriter::num_instances_ = 0;
 122    :  
 123    :  class TestTraceFileWriterFactory : public TraceFileWriterFactory {
 124    :   public:
 125  E :     explicit TestTraceFileWriterFactory(MessageLoop* message_loop)
 126    :         : TraceFileWriterFactory(message_loop) {
 127  E :     }
 128    :  
 129  E :     bool CreateConsumer(scoped_refptr<BufferConsumer>* consumer) OVERRIDE {
 130    :       // w00t, somewhat bogus coverage ploy, at least will reuse the DCHECKS.
 131  E :       EXPECT_TRUE(TraceFileWriterFactory::CreateConsumer(consumer));
 132  E :       EXPECT_TRUE((*consumer)->HasOneRef());
 133    :  
 134  E :       *consumer = new TestTraceFileWriter(message_loop_, trace_file_directory_);
 135  E :       return true;
 136  E :     }
 137    :  };
 138    :  
 139    :  class TestSession : public Session {
 140    :   public:
 141    :    explicit TestSession(Service* service)
 142    :        : Session(service),
 143    :          waiting_for_buffer_to_be_recycled_(&lock_),
 144    :          waiting_for_buffer_to_be_recycled_state_(false),
 145    :          destroying_singleton_buffer_(&lock_),
 146    :          destroying_singleton_buffer_state_(false),
 147    :          last_singleton_buffer_destroyed_(NULL),
 148    :          singleton_buffers_destroyed_(0),
 149    :          allocating_buffers_(&lock_),
 150  E :          allocating_buffers_state_(false) {
 151  E :    }
 152    :  
 153  E :    void AllowBuffersToBeRecycled(size_t num_buffers) {
 154    :      static_cast<TestTraceFileWriter*>(
 155  E :          buffer_consumer())->AllowBuffersToBeRecycled(num_buffers);
 156  E :    }
 157    :  
 158  E :    void ClearWaitingForBufferToBeRecycledState() {
 159  E :      base::AutoLock lock(lock_);
 160  E :      waiting_for_buffer_to_be_recycled_state_ = false;
 161  E :    }
 162    :  
 163  E :    void PauseUntilWaitingForBufferToBeRecycled() {
 164  E :      base::AutoLock lock(lock_);
 165  E :      while (!waiting_for_buffer_to_be_recycled_state_)
 166  E :        waiting_for_buffer_to_be_recycled_.Wait();
 167  E :      waiting_for_buffer_to_be_recycled_state_ = false;
 168  E :    }
 169    :  
 170    :    void ClearDestroyingSingletonBufferState() {
 171    :      base::AutoLock lock(lock_);
 172    :      destroying_singleton_buffer_state_ = false;
 173    :    }
 174    :  
 175  E :    void PauseUntilDestroyingSingletonBuffer() {
 176  E :      base::AutoLock lock(lock_);
 177  E :      while (!destroying_singleton_buffer_state_)
 178  E :        destroying_singleton_buffer_.Wait();
 179  E :      destroying_singleton_buffer_state_ = true;
 180  E :    }
 181    :  
 182  E :    void ClearAllocatingBuffersState() {
 183  E :      base::AutoLock lock(lock_);
 184  E :      allocating_buffers_state_ = false;
 185  E :    }
 186    :  
 187  E :    void PauseUntilAllocatingBuffers() {
 188  E :      base::AutoLock lock(lock_);
 189  E :      while (!allocating_buffers_state_)
 190  E :        allocating_buffers_.Wait();
 191  E :      waiting_for_buffer_to_be_recycled_state_ = false;
 192  E :    }
 193    :  
 194  E :    size_t buffer_requests_waiting_for_recycle() {
 195  E :      base::AutoLock lock(lock_);
 196  E :      return buffer_requests_waiting_for_recycle_;
 197  E :    }
 198    :  
 199  E :    virtual void OnWaitingForBufferToBeRecycled() OVERRIDE {
 200  E :      lock_.AssertAcquired();
 201  E :      waiting_for_buffer_to_be_recycled_state_ = true;
 202  E :      waiting_for_buffer_to_be_recycled_.Signal();
 203  E :    }
 204    :  
 205  E :    virtual void OnDestroySingletonBuffer(Buffer* buffer) OVERRIDE {
 206  E :      lock_.AssertAcquired();
 207  E :      last_singleton_buffer_destroyed_ = buffer;
 208  E :      singleton_buffers_destroyed_++;
 209  E :      destroying_singleton_buffer_state_ = true;
 210  E :      destroying_singleton_buffer_.Signal();
 211  E :    }
 212    :  
 213    :    bool InitializeProcessInfo(ProcessId process_id,
 214  E :                               ProcessInfo* client) OVERRIDE {
 215  E :      DCHECK(client != NULL);
 216    :  
 217    :      // Lobotomize the process info initialization to allow using fake PIDs.
 218  E :      client->process_id = process_id;
 219    :      const DWORD kFlags =
 220  E :          PROCESS_DUP_HANDLE | PROCESS_QUERY_INFORMATION | PROCESS_VM_READ;
 221    :      client->process_handle.Set(
 222  E :          ::OpenProcess(kFlags, FALSE, ::GetCurrentProcessId()));
 223    :      static const wchar_t kEnvironment[] = L"asdf=fofofo\0";
 224    :      client->environment.assign(kEnvironment,
 225  E :                                 kEnvironment + arraysize(kEnvironment));
 226    :  
 227  E :      return true;
 228  E :    }
 229    :  
 230    :    bool CopyBufferHandleToClient(HANDLE client_process_handle,
 231    :                                  HANDLE local_handle,
 232  E :                                  HANDLE* client_copy) OVERRIDE {
 233    :      // Avoid handle leaks by using the same handle for both "ends".
 234  E :      *client_copy = local_handle;
 235  E :      return true;
 236  E :    }
 237    :  
 238  E :    virtual bool AllocateBuffers(size_t count, size_t size) OVERRIDE {
 239  E :      lock_.AssertAcquired();
 240    :  
 241  E :      allocating_buffers_state_ = true;
 242  E :      allocating_buffers_.Signal();
 243    :  
 244    :      // Forward this to the original implementation.
 245  E :      return Session::AllocateBuffers(count, size);
 246  E :    }
 247    :  
 248    :    // Under lock_.
 249    :    base::ConditionVariable waiting_for_buffer_to_be_recycled_;
 250    :    bool waiting_for_buffer_to_be_recycled_state_;
 251    :  
 252    :    // Under lock_.
 253    :    base::ConditionVariable destroying_singleton_buffer_;
 254    :    bool destroying_singleton_buffer_state_;
 255    :    Buffer* last_singleton_buffer_destroyed_;
 256    :    size_t singleton_buffers_destroyed_;
 257    :  
 258    :    // Under lock_.
 259    :    base::ConditionVariable allocating_buffers_;
 260    :    bool allocating_buffers_state_;
 261    :  };
 262    :  
 263    :  typedef scoped_refptr<TestSession> TestSessionPtr;
 264    :  
 265    :  class TestService : public Service {
 266    :   public:
 267    :    explicit TestService(BufferConsumerFactory* factory)
 268    :        : Service(factory),
 269  E :          process_id_(0xfafafa) {
 270  E :    }
 271    :  
 272  E :    TestSessionPtr CreateTestSession() {
 273  E :      scoped_refptr<Session> session;
 274  E :      if (!GetNewSession(++process_id_, &session))
 275  i :        return NULL;
 276    :  
 277  E :      return TestSessionPtr(static_cast<TestSession*>(session.get()));
 278  E :    }
 279    :  
 280  E :    size_t num_active_sessions() const { return num_active_sessions_; }
 281    :  
 282    :   protected:
 283  E :    virtual Session* CreateSession() OVERRIDE {
 284  E :      return new TestSession(this);
 285  E :    }
 286    :  
 287    :   private:
 288    :    uint32 process_id_;  // Under lock_;
 289    :  };
 290    :  
 291    :  class SessionTest : public ::testing::Test {
 292    :   public:
 293    :    SessionTest()
 294    :        : consumer_thread_("session-test-consumer-thread"),
 295    :          consumer_thread_has_started_(
 296    :              consumer_thread_.StartWithOptions(
 297    :                  base::Thread::Options(MessageLoop::TYPE_IO, 0))),
 298    :          trace_file_writer_factory_(consumer_thread_.message_loop()),
 299    :          call_trace_service_(&trace_file_writer_factory_),
 300    :          rpc_service_instance_manager_(&call_trace_service_),
 301    :          worker1_("Worker1"),
 302  E :          worker2_("Worker2") {
 303  E :    }
 304    :  
 305  E :    virtual void SetUp() OVERRIDE {
 306  E :      testing::Test::SetUp();
 307    :  
 308  E :      ASSERT_TRUE(consumer_thread_has_started_);
 309  E :      EXPECT_EQ(0, call_trace_service_.num_active_sessions());
 310  E :      EXPECT_EQ(0, TestTraceFileWriter::num_instances());
 311    :  
 312    :      // Setup the buffer management to make it easy to force buffer contention.
 313  E :      call_trace_service_.set_num_incremental_buffers(2);
 314  E :      call_trace_service_.set_buffer_size_in_bytes(8192);
 315    :  
 316    :      // Create a temporary directory for the call trace files.
 317  E :      ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
 318  E :      ASSERT_TRUE(
 319    :          trace_file_writer_factory_.SetTraceFileDirectory(temp_dir_.path()));
 320    :  
 321    :      // We give the service instance a "unique" id so that it does not interfere
 322    :      // with any other instances or tests that might be concurrently active.
 323  E :      std::string instance_id(base::StringPrintf("%d", ::GetCurrentProcessId()));
 324  E :      call_trace_service_.set_instance_id(::UTF8ToWide(instance_id));
 325    :  
 326    :      // The instance id needs to be in the environment to be picked up by the
 327    :      // client library. We prefix the existing environment variable, if any.
 328  E :      scoped_ptr<base::Environment> env(base::Environment::Create());
 329  E :      ASSERT_FALSE(env.get() == NULL);
 330  E :      std::string env_var;
 331  E :      env->GetVar(::kSyzygyRpcInstanceIdEnvVar, &env_var);
 332  E :      env_var.insert(0, ";");
 333  E :      env_var.insert(0, instance_id);
 334  E :      ASSERT_TRUE(env->SetVar(::kSyzygyRpcInstanceIdEnvVar, env_var));
 335    :  
 336    :      // Start our worker threads so we can use them later.
 337  E :      ASSERT_TRUE(worker1_.Start());
 338  E :      ASSERT_TRUE(worker2_.Start());
 339  E :    }
 340    :  
 341  E :    virtual void TearDown() OVERRIDE {
 342    :      // Stop the worker threads.
 343  E :      worker2_.Stop();
 344  E :      worker1_.Stop();
 345    :  
 346    :      // Stop the call trace service.
 347  E :      EXPECT_TRUE(call_trace_service_.Stop());
 348  E :      EXPECT_FALSE(call_trace_service_.is_running());
 349  E :      EXPECT_EQ(0, call_trace_service_.num_active_sessions());
 350  E :      EXPECT_EQ(0, TestTraceFileWriter::num_instances());
 351  E :    }
 352    :  
 353    :   protected:
 354    :    // The thread on which the trace file writer will consumer buffers and a
 355    :    // helper variable whose initialization we use as a trigger to start the
 356    :    // thread (ensuring it's message_loop is created). These declarations MUST
 357    :    // remain in this order and preceed that of trace_file_writer_factory_;
 358    :    base::Thread consumer_thread_;
 359    :    bool consumer_thread_has_started_;
 360    :  
 361    :    // The call trace service related objects. These declarations MUST be in
 362    :    // this order.
 363    :    TestTraceFileWriterFactory trace_file_writer_factory_;
 364    :    TestService call_trace_service_;
 365    :    RpcServiceInstanceManager rpc_service_instance_manager_;
 366    :  
 367    :    // The directory where trace file output will be written.
 368    :    base::ScopedTempDir temp_dir_;
 369    :  
 370    :    // A couple of worker threads where we can dispatch closures.
 371    :    base::Thread worker1_;
 372    :    base::Thread worker2_;
 373    :  };
 374    :  
 375  E :  void GetNextBuffer(Session* session, Buffer** buffer, bool* result) {
 376  E :    DCHECK(session != NULL);
 377  E :    DCHECK(buffer != NULL);
 378  E :    DCHECK(result != NULL);
 379  E :    *buffer = NULL;
 380  E :    *result = session->GetNextBuffer(buffer);
 381  E :  }
 382    :  
 383    :  }  // namespace
 384    :  
 385  E :  TEST_F(SessionTest, ReturnBufferWorksAfterSessionClose) {
 386  E :    ASSERT_TRUE(call_trace_service_.Start(true));
 387    :  
 388  E :    TestSessionPtr session = call_trace_service_.CreateTestSession();
 389  E :    ASSERT_TRUE(session != NULL);
 390    :  
 391  E :    Buffer* buffer1 = NULL;
 392  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer1));
 393  E :    ASSERT_TRUE(buffer1 != NULL);
 394    :  
 395  E :    ASSERT_TRUE(session->Close());
 396    :  
 397    :    // Closing the session should have forced all buffers to be submitted to
 398    :    // the write queue.
 399  E :    ASSERT_EQ(Buffer::kPendingWrite, buffer1->state);
 400    :  
 401    :    // A request for another buffer should fail.
 402  E :    Buffer* buffer2 = NULL;
 403  E :    ASSERT_FALSE(session->GetNextBuffer(&buffer2));
 404  E :    ASSERT_TRUE(buffer2 == NULL);
 405    :  
 406    :    // Returning the original buffer should be a noop, but it should succeed.
 407    :    // Most of all, it shouldn't cause a race condition.
 408  E :    ASSERT_TRUE(session->ReturnBuffer(buffer1));
 409    :  
 410    :    // Let's allow the outstanding buffers to be written.
 411  E :    session->AllowBuffersToBeRecycled(9999);
 412  E :  }
 413    :  
 414  E :  TEST_F(SessionTest, BackPressureWorks) {
 415    :    // Configure things so that back-pressure will be easily forced.
 416  E :    call_trace_service_.set_max_buffers_pending_write(1);
 417  E :    ASSERT_TRUE(call_trace_service_.Start(true));
 418    :  
 419  E :    TestSessionPtr session = call_trace_service_.CreateTestSession();
 420  E :    ASSERT_TRUE(session != NULL);
 421    :  
 422  E :    Buffer* buffer1 = NULL;
 423  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer1));
 424  E :    ASSERT_TRUE(buffer1 != NULL);
 425    :  
 426  E :    Buffer* buffer2 = NULL;
 427  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer2));
 428  E :    ASSERT_TRUE(buffer2 != NULL);
 429    :  
 430    :    // Return both buffers so we have 2 pending writes. Neither of these will
 431    :    // go through because we have not allowed any buffers to be written yet.
 432  E :    ASSERT_TRUE(session->ReturnBuffer(buffer1));
 433  E :    ASSERT_TRUE(session->ReturnBuffer(buffer2));
 434    :  
 435    :    // We don't care about events up until this point.
 436  E :    session->ClearWaitingForBufferToBeRecycledState();
 437    :  
 438    :    // Start the buffer getter. This launches another thread that will try to
 439    :    // get another buffer. This will be blocked because of the pending writes.
 440  E :    bool result3 = false;
 441  E :    Buffer* buffer3 = NULL;
 442    :    base::Closure buffer_getter3 = base::Bind(
 443  E :        &GetNextBuffer, session, &buffer3, &result3);
 444  E :    worker1_.message_loop()->PostTask(FROM_HERE, buffer_getter3);
 445    :  
 446    :    // Wait for the session to start applying back-pressure. This occurs when it
 447    :    // has indicated that it is waiting for a buffer to be written.
 448  E :    session->PauseUntilWaitingForBufferToBeRecycled();
 449    :  
 450    :    // Allow a single buffer to be written.
 451  E :    session->AllowBuffersToBeRecycled(1);
 452    :  
 453    :    // Wait for the buffer getter to complete.
 454  E :    worker1_.Stop();
 455    :  
 456    :    // Ensure the buffer was a recycled forced wait.
 457  E :    ASSERT_TRUE(result3);
 458  E :    ASSERT_EQ(buffer1, buffer3);
 459    :  
 460    :    // Return the last buffer and allow everything to be written.
 461  E :    ASSERT_TRUE(session->ReturnBuffer(buffer3));
 462  E :    session->AllowBuffersToBeRecycled(9999);
 463  E :  }
 464    :  
 465  E :  TEST_F(SessionTest, BackPressureIsLimited) {
 466    :    // Configure things so that back-pressure will be easily forced.
 467  E :    call_trace_service_.set_max_buffers_pending_write(1);
 468  E :    ASSERT_TRUE(call_trace_service_.Start(true));
 469    :  
 470  E :    TestSessionPtr session = call_trace_service_.CreateTestSession();
 471  E :    ASSERT_TRUE(session != NULL);
 472    :  
 473  E :    Buffer* buffer1 = NULL;
 474  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer1));
 475  E :    ASSERT_TRUE(buffer1 != NULL);
 476    :  
 477  E :    Buffer* buffer2 = NULL;
 478  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer2));
 479  E :    ASSERT_TRUE(buffer2 != NULL);
 480    :  
 481    :    // Return both buffers so we have 2 pending writes. Neither of these will
 482    :    // go through because we have not allowed any buffers to be written yet.
 483  E :    ASSERT_TRUE(session->ReturnBuffer(buffer1));
 484  E :    ASSERT_TRUE(session->ReturnBuffer(buffer2));
 485    :  
 486    :    // Since the back-pressure threshold is 1 and we have 2 pending buffers
 487    :    // if 1 is recycled it will bring us below the back-pressure threshold. Thus
 488    :    // if we pile on a lot of buffer requests, only the first one should apply
 489    :    // back-pressure, and the next ones should cause an allocation.
 490    :  
 491    :    // We don't care about events up until this point.
 492  E :    session->ClearWaitingForBufferToBeRecycledState();
 493  E :    session->ClearAllocatingBuffersState();
 494    :  
 495  E :    bool result3 = false;
 496  E :    Buffer* buffer3 = NULL;
 497    :    base::Closure buffer_getter3 = base::Bind(
 498  E :        &GetNextBuffer, session, &buffer3, &result3);
 499  E :    worker1_.message_loop()->PostTask(FROM_HERE, buffer_getter3);
 500    :  
 501    :    // Wait for the session to start applying back-pressure. This occurs when it
 502    :    // has indicated that it is waiting for a buffer to be written.
 503  E :    session->PauseUntilWaitingForBufferToBeRecycled();
 504    :  
 505    :    // At this point, there should be only one getter applying back pressure.
 506  E :    ASSERT_EQ(1u, session->buffer_requests_waiting_for_recycle());
 507    :  
 508    :    // Allocate yet another buffer on a new thread, this will force an allocation
 509    :    // which in turn will satisfy as many waits as there are buffers allocated.
 510  E :    bool result4 = false;
 511  E :    Buffer* buffer4 = NULL;
 512    :    base::Closure buffer_getter4 = base::Bind(
 513  E :        &GetNextBuffer, session, &buffer4, &result4);
 514  E :    worker2_.message_loop()->PostTask(FROM_HERE, buffer_getter4);
 515    :  
 516    :    // Similarly, wait for an allocation. The second buffer getter should cause
 517    :    // one to occur.
 518  E :    session->PauseUntilAllocatingBuffers();
 519    :  
 520    :    // Allow a single buffer to be written.
 521  E :    session->AllowBuffersToBeRecycled(1);
 522    :  
 523    :    // Wait for the buffer getters to complete.
 524  E :    worker1_.Stop();
 525  E :    worker2_.Stop();
 526  E :    ASSERT_TRUE(result3);
 527  E :    ASSERT_TRUE(result4);
 528    :  
 529    :    // We can't guarantee where the returned buffers come from (recycled or
 530    :    // not), just that they should be returned.
 531  E :    ASSERT_TRUE(buffer3 != NULL);
 532  E :    ASSERT_TRUE(buffer4 != NULL);
 533    :  
 534    :    // Return the last 2 buffers and allow everything to be written.
 535  E :    ASSERT_TRUE(session->ReturnBuffer(buffer3));
 536  E :    ASSERT_TRUE(session->ReturnBuffer(buffer4));
 537  E :    session->AllowBuffersToBeRecycled(9999);
 538  E :  }
 539    :  
 540  E :  TEST_F(SessionTest, LargeBufferRequestAvoidsBackPressure) {
 541    :    // Configure things so that back-pressure will be easily forced.
 542  E :    call_trace_service_.set_max_buffers_pending_write(1);
 543  E :    ASSERT_TRUE(call_trace_service_.Start(true));
 544    :  
 545  E :    TestSessionPtr session = call_trace_service_.CreateTestSession();
 546  E :    ASSERT_TRUE(session != NULL);
 547    :  
 548  E :    Buffer* buffer1 = NULL;
 549  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer1));
 550  E :    ASSERT_TRUE(buffer1 != NULL);
 551    :  
 552  E :    Buffer* buffer2 = NULL;
 553  E :    ASSERT_TRUE(session->GetNextBuffer(&buffer2));
 554  E :    ASSERT_TRUE(buffer2 != NULL);
 555    :  
 556    :    // Return both buffers so we have 2 pending writes. Neither of these will
 557    :    // go through because we have not allowed any buffers to be written yet.
 558  E :    ASSERT_TRUE(session->ReturnBuffer(buffer1));
 559  E :    ASSERT_TRUE(session->ReturnBuffer(buffer2));
 560    :  
 561    :    // Ask for a big buffer. This should go through immediately and side-step the
 562    :    // usual buffer pool. Thus, it is not subject to back-pressure.
 563  E :    Buffer* buffer3 = NULL;
 564  E :    ASSERT_TRUE(session->GetBuffer(10 * 1024 * 1024, &buffer3));
 565  E :    ASSERT_EQ(10u * 1024 * 1024, buffer3->mapping_size);
 566  E :    ASSERT_EQ(10u * 1024 * 1024, buffer3->buffer_size);
 567  E :    ASSERT_EQ(0u, buffer3->buffer_offset);
 568    :  
 569    :    // Return the buffer and allow them all to be recycled.
 570  E :    ASSERT_TRUE(session->ReturnBuffer(buffer3));
 571  E :    session->AllowBuffersToBeRecycled(9999);
 572    :  
 573    :    // Wait until the singleton buffer has been destroyed.
 574  E :    session->PauseUntilDestroyingSingletonBuffer();
 575  E :    ASSERT_EQ(1, session->singleton_buffers_destroyed_);
 576  E :    ASSERT_EQ(buffer3, session->last_singleton_buffer_destroyed_);
 577  E :  }
 578    :  
 579    :  }  // namespace trace
 580    :  }  // namespace service

Coverage information generated Thu Jul 04 09:34:53 2013.