1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/trace/service/session.h"
16 :
17 : #include "base/atomicops.h"
18 : #include "base/bind.h"
19 : #include "base/callback.h"
20 : #include "base/environment.h"
21 : #include "base/file_util.h"
22 : #include "base/files/scoped_temp_dir.h"
23 : #include "base/memory/scoped_ptr.h"
24 : #include "base/strings/stringprintf.h"
25 : #include "base/strings/utf_string_conversions.h"
26 : #include "base/threading/thread.h"
27 : #include "gtest/gtest.h"
28 : #include "syzygy/trace/protocol/call_trace_defs.h"
29 : #include "syzygy/trace/service/service.h"
30 : #include "syzygy/trace/service/service_rpc_impl.h"
31 : #include "syzygy/trace/service/session_trace_file_writer.h"
32 : #include "syzygy/trace/service/session_trace_file_writer_factory.h"
33 :
34 : namespace trace {
35 : namespace service {
36 :
37 : namespace {
38 :
39 : class TestSessionTraceFileWriter : public SessionTraceFileWriter {
40 : public:
41 : explicit TestSessionTraceFileWriter(
42 : base::MessageLoop* message_loop, const base::FilePath& trace_directory)
43 : : SessionTraceFileWriter(message_loop, trace_directory),
44 E : num_buffers_to_recycle_(0) {
45 E : base::subtle::Barrier_AtomicIncrement(&num_instances_, 1);
46 E : }
47 :
48 E : ~TestSessionTraceFileWriter() {
49 E : base::subtle::Barrier_AtomicIncrement(&num_instances_, -1);
50 E : }
51 :
52 E : void RecycleBuffers() {
53 E : queue_lock_.AssertAcquired();
54 :
55 E : while (!queue_.empty() && num_buffers_to_recycle_ != 0) {
56 E : Buffer* buffer = queue_.front();
57 E : queue_.pop_front();
58 :
59 E : ASSERT_TRUE(buffer != NULL);
60 E : ASSERT_EQ(buffer->session, session_ref_.get());
61 E : ASSERT_TRUE(
62 : SessionTraceFileWriter::ConsumeBuffer(buffer));
63 :
64 E : --num_buffers_to_recycle_;
65 E : }
66 :
67 : // If we've emptied the queue, release our reference to the session.
68 E : if (queue_.empty())
69 E : session_ref_ = reinterpret_cast<Session*>(NULL);
70 E : }
71 :
72 E : void AllowBuffersToBeRecycled(size_t num_buffers) {
73 E : base::AutoLock auto_lock(queue_lock_);
74 :
75 E : num_buffers_to_recycle_ = num_buffers;
76 E : RecycleBuffers();
77 E : }
78 :
79 E : virtual bool ConsumeBuffer(Buffer* buffer) OVERRIDE {
80 E : base::AutoLock auto_lock(queue_lock_);
81 E : EXPECT_TRUE(buffer != NULL);
82 E : if (buffer) {
83 : // While there are buffers in the queue, keep a reference to the session.
84 E : if (queue_.empty()) {
85 E : EXPECT_TRUE(session_ref_.get() == NULL);
86 E : EXPECT_TRUE(buffer->session != NULL);
87 E : session_ref_ = buffer->session;
88 : }
89 :
90 : // Put the buffer into the consumer queue.
91 E : queue_.push_back(buffer);
92 : }
93 :
94 E : RecycleBuffers();
95 :
96 E : return buffer != NULL;
97 E : }
98 :
99 E : static base::subtle::Atomic32 num_instances() {
100 E : return base::subtle::Acquire_Load(&num_instances_);
101 E : }
102 :
103 : protected:
104 : // The queue of buffers to be consumed.
105 : std::deque<Buffer*> queue_;
106 :
107 : // This keeps the session object alive while there are buffers in the queue.
108 : scoped_refptr<Session> session_ref_;
109 :
110 : // A lock to protect access to the queue and session reference.
111 : base::Lock queue_lock_;
112 :
113 : // The number of buffers to recycle berfore pausing.
114 : size_t num_buffers_to_recycle_;
115 :
116 : // The number of active writer instances.
117 : // @note All accesses to this member should be via base/atomicops.h functions.
118 : static volatile base::subtle::Atomic32 num_instances_;
119 : };
120 :
121 : volatile base::subtle::Atomic32 TestSessionTraceFileWriter::num_instances_ = 0;
122 :
123 : class TestSessionTraceFileWriterFactory : public SessionTraceFileWriterFactory {
124 : public:
125 E : explicit TestSessionTraceFileWriterFactory(base::MessageLoop* message_loop)
126 : : SessionTraceFileWriterFactory(message_loop) {
127 E : }
128 :
129 E : bool CreateConsumer(scoped_refptr<BufferConsumer>* consumer) OVERRIDE {
130 : // w00t, somewhat bogus coverage ploy, at least will reuse the DCHECKS.
131 E : EXPECT_TRUE(SessionTraceFileWriterFactory::CreateConsumer(consumer));
132 E : EXPECT_TRUE((*consumer)->HasOneRef());
133 :
134 : *consumer = new TestSessionTraceFileWriter(
135 E : message_loop_, trace_file_directory_);
136 E : return true;
137 E : }
138 : };
139 :
140 : class TestSession : public Session {
141 : public:
142 : explicit TestSession(Service* service)
143 : : Session(service),
144 : waiting_for_buffer_to_be_recycled_(&lock_),
145 : waiting_for_buffer_to_be_recycled_state_(false),
146 : destroying_singleton_buffer_(&lock_),
147 : destroying_singleton_buffer_state_(false),
148 : last_singleton_buffer_destroyed_(NULL),
149 : singleton_buffers_destroyed_(0),
150 : allocating_buffers_(&lock_),
151 E : allocating_buffers_state_(false) {
152 E : }
153 :
154 E : void AllowBuffersToBeRecycled(size_t num_buffers) {
155 : static_cast<TestSessionTraceFileWriter*>(
156 E : buffer_consumer())->AllowBuffersToBeRecycled(num_buffers);
157 E : }
158 :
159 E : void ClearWaitingForBufferToBeRecycledState() {
160 E : base::AutoLock lock(lock_);
161 E : waiting_for_buffer_to_be_recycled_state_ = false;
162 E : }
163 :
164 E : void PauseUntilWaitingForBufferToBeRecycled() {
165 E : base::AutoLock lock(lock_);
166 E : while (!waiting_for_buffer_to_be_recycled_state_)
167 E : waiting_for_buffer_to_be_recycled_.Wait();
168 E : waiting_for_buffer_to_be_recycled_state_ = false;
169 E : }
170 :
171 : void ClearDestroyingSingletonBufferState() {
172 : base::AutoLock lock(lock_);
173 : destroying_singleton_buffer_state_ = false;
174 : }
175 :
176 E : void PauseUntilDestroyingSingletonBuffer() {
177 E : base::AutoLock lock(lock_);
178 E : while (!destroying_singleton_buffer_state_)
179 E : destroying_singleton_buffer_.Wait();
180 E : destroying_singleton_buffer_state_ = true;
181 E : }
182 :
183 E : void ClearAllocatingBuffersState() {
184 E : base::AutoLock lock(lock_);
185 E : allocating_buffers_state_ = false;
186 E : }
187 :
188 E : void PauseUntilAllocatingBuffers() {
189 E : base::AutoLock lock(lock_);
190 E : while (!allocating_buffers_state_)
191 E : allocating_buffers_.Wait();
192 E : waiting_for_buffer_to_be_recycled_state_ = false;
193 E : }
194 :
195 E : size_t buffer_requests_waiting_for_recycle() {
196 E : base::AutoLock lock(lock_);
197 E : return buffer_requests_waiting_for_recycle_;
198 E : }
199 :
200 E : virtual void OnWaitingForBufferToBeRecycled() OVERRIDE {
201 E : lock_.AssertAcquired();
202 E : waiting_for_buffer_to_be_recycled_state_ = true;
203 E : waiting_for_buffer_to_be_recycled_.Signal();
204 E : }
205 :
206 E : virtual void OnDestroySingletonBuffer(Buffer* buffer) OVERRIDE {
207 E : lock_.AssertAcquired();
208 E : last_singleton_buffer_destroyed_ = buffer;
209 E : singleton_buffers_destroyed_++;
210 E : destroying_singleton_buffer_state_ = true;
211 E : destroying_singleton_buffer_.Signal();
212 E : }
213 :
214 : bool InitializeProcessInfo(ProcessId process_id,
215 E : ProcessInfo* client) OVERRIDE {
216 E : DCHECK(client != NULL);
217 :
218 : // Lobotomize the process info initialization to allow using fake PIDs.
219 E : client->process_id = process_id;
220 : const DWORD kFlags =
221 E : PROCESS_DUP_HANDLE | PROCESS_QUERY_INFORMATION | PROCESS_VM_READ;
222 : client->process_handle.Set(
223 E : ::OpenProcess(kFlags, FALSE, ::GetCurrentProcessId()));
224 : static const wchar_t kEnvironment[] = L"asdf=fofofo\0";
225 : client->environment.assign(kEnvironment,
226 E : kEnvironment + arraysize(kEnvironment));
227 :
228 E : return true;
229 E : }
230 :
231 : bool CopyBufferHandleToClient(HANDLE client_process_handle,
232 : HANDLE local_handle,
233 E : HANDLE* client_copy) OVERRIDE {
234 : // Avoid handle leaks by using the same handle for both "ends".
235 E : *client_copy = local_handle;
236 E : return true;
237 E : }
238 :
239 E : virtual bool AllocateBuffers(size_t count, size_t size) OVERRIDE {
240 E : lock_.AssertAcquired();
241 :
242 E : allocating_buffers_state_ = true;
243 E : allocating_buffers_.Signal();
244 :
245 : // Forward this to the original implementation.
246 E : return Session::AllocateBuffers(count, size);
247 E : }
248 :
249 : // Under lock_.
250 : base::ConditionVariable waiting_for_buffer_to_be_recycled_;
251 : bool waiting_for_buffer_to_be_recycled_state_;
252 :
253 : // Under lock_.
254 : base::ConditionVariable destroying_singleton_buffer_;
255 : bool destroying_singleton_buffer_state_;
256 : Buffer* last_singleton_buffer_destroyed_;
257 : size_t singleton_buffers_destroyed_;
258 :
259 : // Under lock_.
260 : base::ConditionVariable allocating_buffers_;
261 : bool allocating_buffers_state_;
262 : };
263 :
264 : typedef scoped_refptr<TestSession> TestSessionPtr;
265 :
266 : class TestService : public Service {
267 : public:
268 : explicit TestService(BufferConsumerFactory* factory)
269 : : Service(factory),
270 E : process_id_(0xfafafa) {
271 E : }
272 :
273 E : TestSessionPtr CreateTestSession() {
274 E : scoped_refptr<Session> session;
275 E : if (!GetNewSession(++process_id_, &session))
276 i : return NULL;
277 :
278 E : return TestSessionPtr(static_cast<TestSession*>(session.get()));
279 E : }
280 :
281 E : size_t num_active_sessions() const { return num_active_sessions_; }
282 :
283 : protected:
284 E : virtual Session* CreateSession() OVERRIDE {
285 E : return new TestSession(this);
286 E : }
287 :
288 : private:
289 : uint32 process_id_; // Under lock_;
290 : };
291 :
292 : class SessionTest : public ::testing::Test {
293 : public:
294 : SessionTest()
295 : : consumer_thread_("session-test-consumer-thread"),
296 : consumer_thread_has_started_(
297 : consumer_thread_.StartWithOptions(
298 : base::Thread::Options(base::MessageLoop::TYPE_IO, 0))),
299 : session_trace_file_writer_factory_(consumer_thread_.message_loop()),
300 : call_trace_service_(&session_trace_file_writer_factory_),
301 : rpc_service_instance_manager_(&call_trace_service_),
302 : worker1_("Worker1"),
303 E : worker2_("Worker2") {
304 E : }
305 :
306 E : virtual void SetUp() OVERRIDE {
307 E : testing::Test::SetUp();
308 :
309 E : ASSERT_TRUE(consumer_thread_has_started_);
310 E : EXPECT_EQ(0, call_trace_service_.num_active_sessions());
311 E : EXPECT_EQ(0, TestSessionTraceFileWriter::num_instances());
312 :
313 : // Setup the buffer management to make it easy to force buffer contention.
314 E : call_trace_service_.set_num_incremental_buffers(2);
315 E : call_trace_service_.set_buffer_size_in_bytes(8192);
316 :
317 : // Create a temporary directory for the call trace files.
318 E : ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
319 E : ASSERT_TRUE(session_trace_file_writer_factory_.SetTraceFileDirectory(
320 : temp_dir_.path()));
321 :
322 : // We give the service instance a "unique" id so that it does not interfere
323 : // with any other instances or tests that might be concurrently active.
324 E : std::string instance_id(base::StringPrintf("%d", ::GetCurrentProcessId()));
325 E : call_trace_service_.set_instance_id(base::UTF8ToWide(instance_id));
326 :
327 : // The instance id needs to be in the environment to be picked up by the
328 : // client library. We prefix the existing environment variable, if any.
329 E : scoped_ptr<base::Environment> env(base::Environment::Create());
330 E : ASSERT_FALSE(env.get() == NULL);
331 E : std::string env_var;
332 E : env->GetVar(::kSyzygyRpcInstanceIdEnvVar, &env_var);
333 E : env_var.insert(0, ";");
334 E : env_var.insert(0, instance_id);
335 E : ASSERT_TRUE(env->SetVar(::kSyzygyRpcInstanceIdEnvVar, env_var));
336 :
337 : // Start our worker threads so we can use them later.
338 E : ASSERT_TRUE(worker1_.Start());
339 E : ASSERT_TRUE(worker2_.Start());
340 E : }
341 :
342 E : virtual void TearDown() OVERRIDE {
343 : // Stop the worker threads.
344 E : worker2_.Stop();
345 E : worker1_.Stop();
346 :
347 : // Stop the call trace service.
348 E : EXPECT_TRUE(call_trace_service_.Stop());
349 E : EXPECT_FALSE(call_trace_service_.is_running());
350 E : EXPECT_EQ(0, call_trace_service_.num_active_sessions());
351 E : EXPECT_EQ(0, TestSessionTraceFileWriter::num_instances());
352 E : }
353 :
354 : protected:
355 : // The thread on which the trace file writer will consumer buffers and a
356 : // helper variable whose initialization we use as a trigger to start the
357 : // thread (ensuring it's message_loop is created). These declarations MUST
358 : // remain in this order and preceed that of trace_file_writer_factory_;
359 : base::Thread consumer_thread_;
360 : bool consumer_thread_has_started_;
361 :
362 : // The call trace service related objects. These declarations MUST be in
363 : // this order.
364 : TestSessionTraceFileWriterFactory session_trace_file_writer_factory_;
365 : TestService call_trace_service_;
366 : RpcServiceInstanceManager rpc_service_instance_manager_;
367 :
368 : // The directory where trace file output will be written.
369 : base::ScopedTempDir temp_dir_;
370 :
371 : // A couple of worker threads where we can dispatch closures.
372 : base::Thread worker1_;
373 : base::Thread worker2_;
374 : };
375 :
376 E : void GetNextBuffer(Session* session, Buffer** buffer, bool* result) {
377 E : DCHECK(session != NULL);
378 E : DCHECK(buffer != NULL);
379 E : DCHECK(result != NULL);
380 E : *buffer = NULL;
381 E : *result = session->GetNextBuffer(buffer);
382 E : }
383 :
384 : } // namespace
385 :
386 E : TEST_F(SessionTest, ReturnBufferWorksAfterSessionClose) {
387 E : ASSERT_TRUE(call_trace_service_.Start(true));
388 :
389 E : TestSessionPtr session = call_trace_service_.CreateTestSession();
390 E : ASSERT_TRUE(session != NULL);
391 :
392 E : Buffer* buffer1 = NULL;
393 E : ASSERT_TRUE(session->GetNextBuffer(&buffer1));
394 E : ASSERT_TRUE(buffer1 != NULL);
395 :
396 E : ASSERT_TRUE(session->Close());
397 :
398 : // Closing the session should have forced all buffers to be submitted to
399 : // the write queue.
400 E : ASSERT_EQ(Buffer::kPendingWrite, buffer1->state);
401 :
402 : // A request for another buffer should fail.
403 E : Buffer* buffer2 = NULL;
404 E : ASSERT_FALSE(session->GetNextBuffer(&buffer2));
405 E : ASSERT_TRUE(buffer2 == NULL);
406 :
407 : // Returning the original buffer should be a noop, but it should succeed.
408 : // Most of all, it shouldn't cause a race condition.
409 E : ASSERT_TRUE(session->ReturnBuffer(buffer1));
410 :
411 : // Let's allow the outstanding buffers to be written.
412 E : session->AllowBuffersToBeRecycled(9999);
413 E : }
414 :
415 E : TEST_F(SessionTest, BackPressureWorks) {
416 : // Configure things so that back-pressure will be easily forced.
417 E : call_trace_service_.set_max_buffers_pending_write(1);
418 E : ASSERT_TRUE(call_trace_service_.Start(true));
419 :
420 E : TestSessionPtr session = call_trace_service_.CreateTestSession();
421 E : ASSERT_TRUE(session != NULL);
422 :
423 E : Buffer* buffer1 = NULL;
424 E : ASSERT_TRUE(session->GetNextBuffer(&buffer1));
425 E : ASSERT_TRUE(buffer1 != NULL);
426 :
427 E : Buffer* buffer2 = NULL;
428 E : ASSERT_TRUE(session->GetNextBuffer(&buffer2));
429 E : ASSERT_TRUE(buffer2 != NULL);
430 :
431 : // Return both buffers so we have 2 pending writes. Neither of these will
432 : // go through because we have not allowed any buffers to be written yet.
433 E : ASSERT_TRUE(session->ReturnBuffer(buffer1));
434 E : ASSERT_TRUE(session->ReturnBuffer(buffer2));
435 :
436 : // We don't care about events up until this point.
437 E : session->ClearWaitingForBufferToBeRecycledState();
438 :
439 : // Start the buffer getter. This launches another thread that will try to
440 : // get another buffer. This will be blocked because of the pending writes.
441 E : bool result3 = false;
442 E : Buffer* buffer3 = NULL;
443 : base::Closure buffer_getter3 = base::Bind(
444 E : &GetNextBuffer, session, &buffer3, &result3);
445 E : worker1_.message_loop()->PostTask(FROM_HERE, buffer_getter3);
446 :
447 : // Wait for the session to start applying back-pressure. This occurs when it
448 : // has indicated that it is waiting for a buffer to be written.
449 E : session->PauseUntilWaitingForBufferToBeRecycled();
450 :
451 : // Allow a single buffer to be written.
452 E : session->AllowBuffersToBeRecycled(1);
453 :
454 : // Wait for the buffer getter to complete.
455 E : worker1_.Stop();
456 :
457 : // Ensure the buffer was a recycled forced wait.
458 E : ASSERT_TRUE(result3);
459 E : ASSERT_EQ(buffer1, buffer3);
460 :
461 : // Return the last buffer and allow everything to be written.
462 E : ASSERT_TRUE(session->ReturnBuffer(buffer3));
463 E : session->AllowBuffersToBeRecycled(9999);
464 E : }
465 :
466 E : TEST_F(SessionTest, BackPressureIsLimited) {
467 : // Configure things so that back-pressure will be easily forced.
468 E : call_trace_service_.set_max_buffers_pending_write(1);
469 E : ASSERT_TRUE(call_trace_service_.Start(true));
470 :
471 E : TestSessionPtr session = call_trace_service_.CreateTestSession();
472 E : ASSERT_TRUE(session != NULL);
473 :
474 E : Buffer* buffer1 = NULL;
475 E : ASSERT_TRUE(session->GetNextBuffer(&buffer1));
476 E : ASSERT_TRUE(buffer1 != NULL);
477 :
478 E : Buffer* buffer2 = NULL;
479 E : ASSERT_TRUE(session->GetNextBuffer(&buffer2));
480 E : ASSERT_TRUE(buffer2 != NULL);
481 :
482 : // Return both buffers so we have 2 pending writes. Neither of these will
483 : // go through because we have not allowed any buffers to be written yet.
484 E : ASSERT_TRUE(session->ReturnBuffer(buffer1));
485 E : ASSERT_TRUE(session->ReturnBuffer(buffer2));
486 :
487 : // Since the back-pressure threshold is 1 and we have 2 pending buffers
488 : // if 1 is recycled it will bring us below the back-pressure threshold. Thus
489 : // if we pile on a lot of buffer requests, only the first one should apply
490 : // back-pressure, and the next ones should cause an allocation.
491 :
492 : // We don't care about events up until this point.
493 E : session->ClearWaitingForBufferToBeRecycledState();
494 E : session->ClearAllocatingBuffersState();
495 :
496 E : bool result3 = false;
497 E : Buffer* buffer3 = NULL;
498 : base::Closure buffer_getter3 = base::Bind(
499 E : &GetNextBuffer, session, &buffer3, &result3);
500 E : worker1_.message_loop()->PostTask(FROM_HERE, buffer_getter3);
501 :
502 : // Wait for the session to start applying back-pressure. This occurs when it
503 : // has indicated that it is waiting for a buffer to be written.
504 E : session->PauseUntilWaitingForBufferToBeRecycled();
505 :
506 : // At this point, there should be only one getter applying back pressure.
507 E : ASSERT_EQ(1u, session->buffer_requests_waiting_for_recycle());
508 :
509 : // Allocate yet another buffer on a new thread, this will force an allocation
510 : // which in turn will satisfy as many waits as there are buffers allocated.
511 E : bool result4 = false;
512 E : Buffer* buffer4 = NULL;
513 : base::Closure buffer_getter4 = base::Bind(
514 E : &GetNextBuffer, session, &buffer4, &result4);
515 E : worker2_.message_loop()->PostTask(FROM_HERE, buffer_getter4);
516 :
517 : // Similarly, wait for an allocation. The second buffer getter should cause
518 : // one to occur.
519 E : session->PauseUntilAllocatingBuffers();
520 :
521 : // Allow a single buffer to be written.
522 E : session->AllowBuffersToBeRecycled(1);
523 :
524 : // Wait for the buffer getters to complete.
525 E : worker1_.Stop();
526 E : worker2_.Stop();
527 E : ASSERT_TRUE(result3);
528 E : ASSERT_TRUE(result4);
529 :
530 : // We can't guarantee where the returned buffers come from (recycled or
531 : // not), just that they should be returned.
532 E : ASSERT_TRUE(buffer3 != NULL);
533 E : ASSERT_TRUE(buffer4 != NULL);
534 :
535 : // Return the last 2 buffers and allow everything to be written.
536 E : ASSERT_TRUE(session->ReturnBuffer(buffer3));
537 E : ASSERT_TRUE(session->ReturnBuffer(buffer4));
538 E : session->AllowBuffersToBeRecycled(9999);
539 E : }
540 :
541 E : TEST_F(SessionTest, LargeBufferRequestAvoidsBackPressure) {
542 : // Configure things so that back-pressure will be easily forced.
543 E : call_trace_service_.set_max_buffers_pending_write(1);
544 E : ASSERT_TRUE(call_trace_service_.Start(true));
545 :
546 E : TestSessionPtr session = call_trace_service_.CreateTestSession();
547 E : ASSERT_TRUE(session != NULL);
548 :
549 E : Buffer* buffer1 = NULL;
550 E : ASSERT_TRUE(session->GetNextBuffer(&buffer1));
551 E : ASSERT_TRUE(buffer1 != NULL);
552 :
553 E : Buffer* buffer2 = NULL;
554 E : ASSERT_TRUE(session->GetNextBuffer(&buffer2));
555 E : ASSERT_TRUE(buffer2 != NULL);
556 :
557 : // Return both buffers so we have 2 pending writes. Neither of these will
558 : // go through because we have not allowed any buffers to be written yet.
559 E : ASSERT_TRUE(session->ReturnBuffer(buffer1));
560 E : ASSERT_TRUE(session->ReturnBuffer(buffer2));
561 :
562 : // Ask for a big buffer. This should go through immediately and side-step the
563 : // usual buffer pool. Thus, it is not subject to back-pressure.
564 E : Buffer* buffer3 = NULL;
565 E : ASSERT_TRUE(session->GetBuffer(10 * 1024 * 1024, &buffer3));
566 E : ASSERT_EQ(10u * 1024 * 1024, buffer3->mapping_size);
567 E : ASSERT_EQ(10u * 1024 * 1024, buffer3->buffer_size);
568 E : ASSERT_EQ(0u, buffer3->buffer_offset);
569 :
570 : // Return the buffer and allow them all to be recycled.
571 E : ASSERT_TRUE(session->ReturnBuffer(buffer3));
572 E : session->AllowBuffersToBeRecycled(9999);
573 :
574 : // Wait until the singleton buffer has been destroyed.
575 E : session->PauseUntilDestroyingSingletonBuffer();
576 E : ASSERT_EQ(1, session->singleton_buffers_destroyed_);
577 E : ASSERT_EQ(buffer3, session->last_singleton_buffer_destroyed_);
578 E : }
579 :
580 : } // namespace service
581 : } // namespace trace
|