1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/trace/service/session.h"
16 :
17 : #include <memory>
18 :
19 : #include "base/atomicops.h"
20 : #include "base/bind.h"
21 : #include "base/callback.h"
22 : #include "base/environment.h"
23 : #include "base/files/file_util.h"
24 : #include "base/files/scoped_temp_dir.h"
25 : #include "base/strings/stringprintf.h"
26 : #include "base/strings/utf_string_conversions.h"
27 : #include "base/threading/thread.h"
28 : #include "gtest/gtest.h"
29 : #include "syzygy/trace/protocol/call_trace_defs.h"
30 : #include "syzygy/trace/service/service.h"
31 : #include "syzygy/trace/service/service_rpc_impl.h"
32 : #include "syzygy/trace/service/session_trace_file_writer.h"
33 : #include "syzygy/trace/service/session_trace_file_writer_factory.h"
34 :
35 : namespace trace {
36 : namespace service {
37 :
38 : namespace {
39 :
40 : class TestSessionTraceFileWriter : public SessionTraceFileWriter {
41 : public:
42 : explicit TestSessionTraceFileWriter(
43 : base::MessageLoop* message_loop, const base::FilePath& trace_directory)
44 E : : SessionTraceFileWriter(message_loop, trace_directory),
45 E : num_buffers_to_recycle_(0) {
46 E : base::subtle::Barrier_AtomicIncrement(&num_instances_, 1);
47 E : }
48 :
49 E : ~TestSessionTraceFileWriter() {
50 E : base::subtle::Barrier_AtomicIncrement(&num_instances_, -1);
51 E : }
52 :
53 E : void RecycleBuffers() {
54 E : queue_lock_.AssertAcquired();
55 :
56 E : while (!queue_.empty() && num_buffers_to_recycle_ != 0) {
57 E : Buffer* buffer = queue_.front();
58 E : queue_.pop_front();
59 :
60 E : ASSERT_TRUE(buffer != NULL);
61 E : ASSERT_EQ(buffer->session, session_ref_.get());
62 E : ASSERT_TRUE(
63 : SessionTraceFileWriter::ConsumeBuffer(buffer));
64 :
65 E : --num_buffers_to_recycle_;
66 E : }
67 :
68 : // If we've emptied the queue, release our reference to the session.
69 E : if (queue_.empty())
70 E : session_ref_ = reinterpret_cast<Session*>(NULL);
71 E : }
72 :
73 E : void AllowBuffersToBeRecycled(size_t num_buffers) {
74 E : base::AutoLock auto_lock(queue_lock_);
75 :
76 E : num_buffers_to_recycle_ = num_buffers;
77 E : RecycleBuffers();
78 E : }
79 :
80 E : bool ConsumeBuffer(Buffer* buffer) override {
81 E : base::AutoLock auto_lock(queue_lock_);
82 E : EXPECT_TRUE(buffer != NULL);
83 E : if (buffer) {
84 : // While there are buffers in the queue, keep a reference to the session.
85 E : if (queue_.empty()) {
86 E : EXPECT_TRUE(session_ref_.get() == NULL);
87 E : EXPECT_TRUE(buffer->session != NULL);
88 E : session_ref_ = buffer->session;
89 : }
90 :
91 : // Put the buffer into the consumer queue.
92 E : queue_.push_back(buffer);
93 : }
94 :
95 E : RecycleBuffers();
96 :
97 E : return buffer != NULL;
98 E : }
99 :
100 E : static base::subtle::Atomic32 num_instances() {
101 E : return base::subtle::Acquire_Load(&num_instances_);
102 E : }
103 :
104 : protected:
105 : // The queue of buffers to be consumed.
106 : std::deque<Buffer*> queue_;
107 :
108 : // This keeps the session object alive while there are buffers in the queue.
109 : scoped_refptr<Session> session_ref_;
110 :
111 : // A lock to protect access to the queue and session reference.
112 : base::Lock queue_lock_;
113 :
114 : // The number of buffers to recycle berfore pausing.
115 : size_t num_buffers_to_recycle_;
116 :
117 : // The number of active writer instances.
118 : // @note All accesses to this member should be via base/atomicops.h functions.
119 : static volatile base::subtle::Atomic32 num_instances_;
120 : };
121 :
122 : volatile base::subtle::Atomic32 TestSessionTraceFileWriter::num_instances_ = 0;
123 :
124 : class TestSessionTraceFileWriterFactory : public SessionTraceFileWriterFactory {
125 : public:
126 E : explicit TestSessionTraceFileWriterFactory(base::MessageLoop* message_loop)
127 E : : SessionTraceFileWriterFactory(message_loop) {
128 E : }
129 :
130 E : bool CreateConsumer(scoped_refptr<BufferConsumer>* consumer) override {
131 : // w00t, somewhat bogus coverage ploy, at least will reuse the DCHECKS.
132 E : EXPECT_TRUE(SessionTraceFileWriterFactory::CreateConsumer(consumer));
133 E : EXPECT_TRUE((*consumer)->HasOneRef());
134 :
135 E : *consumer = new TestSessionTraceFileWriter(
136 : message_loop_, trace_file_directory_);
137 E : return true;
138 E : }
139 : };
140 :
141 : class TestSession : public Session {
142 : public:
143 : explicit TestSession(Service* service)
144 E : : Session(service),
145 E : waiting_for_buffer_to_be_recycled_(&lock_),
146 E : waiting_for_buffer_to_be_recycled_state_(false),
147 E : destroying_singleton_buffer_(&lock_),
148 E : destroying_singleton_buffer_state_(false),
149 E : last_singleton_buffer_destroyed_(NULL),
150 E : singleton_buffers_destroyed_(0),
151 E : allocating_buffers_(&lock_),
152 E : allocating_buffers_state_(false) {
153 E : }
154 :
155 E : void AllowBuffersToBeRecycled(size_t num_buffers) {
156 : static_cast<TestSessionTraceFileWriter*>(
157 E : buffer_consumer())->AllowBuffersToBeRecycled(num_buffers);
158 E : }
159 :
160 E : void ClearWaitingForBufferToBeRecycledState() {
161 E : base::AutoLock lock(lock_);
162 E : waiting_for_buffer_to_be_recycled_state_ = false;
163 E : }
164 :
165 E : void PauseUntilWaitingForBufferToBeRecycled() {
166 E : base::AutoLock lock(lock_);
167 E : while (!waiting_for_buffer_to_be_recycled_state_)
168 E : waiting_for_buffer_to_be_recycled_.Wait();
169 E : waiting_for_buffer_to_be_recycled_state_ = false;
170 E : }
171 :
172 : void ClearDestroyingSingletonBufferState() {
173 : base::AutoLock lock(lock_);
174 : destroying_singleton_buffer_state_ = false;
175 : }
176 :
177 E : void PauseUntilDestroyingSingletonBuffer() {
178 E : base::AutoLock lock(lock_);
179 E : while (!destroying_singleton_buffer_state_)
180 E : destroying_singleton_buffer_.Wait();
181 E : destroying_singleton_buffer_state_ = true;
182 E : }
183 :
184 E : void ClearAllocatingBuffersState() {
185 E : base::AutoLock lock(lock_);
186 E : allocating_buffers_state_ = false;
187 E : }
188 :
189 E : void PauseUntilAllocatingBuffers() {
190 E : base::AutoLock lock(lock_);
191 E : while (!allocating_buffers_state_)
192 E : allocating_buffers_.Wait();
193 E : waiting_for_buffer_to_be_recycled_state_ = false;
194 E : }
195 :
196 E : size_t buffer_requests_waiting_for_recycle() {
197 E : base::AutoLock lock(lock_);
198 E : return buffer_requests_waiting_for_recycle_;
199 E : }
200 :
201 E : void OnWaitingForBufferToBeRecycled() override {
202 E : lock_.AssertAcquired();
203 E : waiting_for_buffer_to_be_recycled_state_ = true;
204 E : waiting_for_buffer_to_be_recycled_.Signal();
205 E : }
206 :
207 E : void OnDestroySingletonBuffer(Buffer* buffer) override {
208 E : lock_.AssertAcquired();
209 E : last_singleton_buffer_destroyed_ = buffer;
210 E : singleton_buffers_destroyed_++;
211 E : destroying_singleton_buffer_state_ = true;
212 E : destroying_singleton_buffer_.Signal();
213 E : }
214 :
215 : bool InitializeProcessInfo(ProcessId process_id,
216 E : ProcessInfo* client) override {
217 E : DCHECK(client != NULL);
218 :
219 : // Lobotomize the process info initialization to allow using fake PIDs.
220 E : client->process_id = process_id;
221 : const DWORD kFlags =
222 E : PROCESS_DUP_HANDLE | PROCESS_QUERY_INFORMATION | PROCESS_VM_READ;
223 E : client->process_handle.Set(
224 : ::OpenProcess(kFlags, FALSE, ::GetCurrentProcessId()));
225 : static const wchar_t kEnvironment[] = L"asdf=fofofo\0";
226 E : client->environment.assign(kEnvironment,
227 : kEnvironment + arraysize(kEnvironment));
228 :
229 E : return true;
230 E : }
231 :
232 : bool CopyBufferHandleToClient(HANDLE client_process_handle,
233 : HANDLE local_handle,
234 E : HANDLE* client_copy) override {
235 : // Avoid handle leaks by using the same handle for both "ends".
236 E : *client_copy = local_handle;
237 E : return true;
238 E : }
239 :
240 E : bool AllocateBuffers(size_t count, size_t size) override {
241 E : lock_.AssertAcquired();
242 :
243 E : allocating_buffers_state_ = true;
244 E : allocating_buffers_.Signal();
245 :
246 : // Forward this to the original implementation.
247 E : return Session::AllocateBuffers(count, size);
248 E : }
249 :
250 : // Under lock_.
251 : base::ConditionVariable waiting_for_buffer_to_be_recycled_;
252 : bool waiting_for_buffer_to_be_recycled_state_;
253 :
254 : // Under lock_.
255 : base::ConditionVariable destroying_singleton_buffer_;
256 : bool destroying_singleton_buffer_state_;
257 : Buffer* last_singleton_buffer_destroyed_;
258 : size_t singleton_buffers_destroyed_;
259 :
260 : // Under lock_.
261 : base::ConditionVariable allocating_buffers_;
262 : bool allocating_buffers_state_;
263 : };
264 :
265 : typedef scoped_refptr<TestSession> TestSessionPtr;
266 :
267 : class TestService : public Service {
268 : public:
269 : explicit TestService(BufferConsumerFactory* factory)
270 E : : Service(factory),
271 E : process_id_(0xfafafa) {
272 E : }
273 :
274 E : TestSessionPtr CreateTestSession() {
275 E : scoped_refptr<Session> session;
276 E : if (!GetNewSession(++process_id_, &session))
277 i : return NULL;
278 :
279 E : return TestSessionPtr(static_cast<TestSession*>(session.get()));
280 E : }
281 :
282 E : size_t num_active_sessions() const { return num_active_sessions_; }
283 :
284 : protected:
285 E : Session* CreateSession() override { return new TestSession(this); }
286 :
287 : private:
288 : uint32_t process_id_; // Under lock_;
289 : };
290 :
291 : class SessionTest : public ::testing::Test {
292 : public:
293 : SessionTest()
294 E : : consumer_thread_("session-test-consumer-thread"),
295 E : consumer_thread_has_started_(
296 : consumer_thread_.StartWithOptions(
297 : base::Thread::Options(base::MessageLoop::TYPE_IO, 0))),
298 E : session_trace_file_writer_factory_(consumer_thread_.message_loop()),
299 E : call_trace_service_(&session_trace_file_writer_factory_),
300 E : rpc_service_instance_manager_(&call_trace_service_),
301 E : worker1_("Worker1"),
302 E : worker2_("Worker2") {
303 E : }
304 :
305 E : void SetUp() override {
306 E : testing::Test::SetUp();
307 :
308 E : ASSERT_TRUE(consumer_thread_has_started_);
309 E : EXPECT_EQ(0, call_trace_service_.num_active_sessions());
310 E : EXPECT_EQ(0, TestSessionTraceFileWriter::num_instances());
311 :
312 : // Setup the buffer management to make it easy to force buffer contention.
313 E : call_trace_service_.set_num_incremental_buffers(2);
314 E : call_trace_service_.set_buffer_size_in_bytes(8192);
315 :
316 : // Create a temporary directory for the call trace files.
317 E : ASSERT_TRUE(temp_dir_.CreateUniqueTempDir());
318 E : ASSERT_TRUE(session_trace_file_writer_factory_.SetTraceFileDirectory(
319 : temp_dir_.path()));
320 :
321 : // We give the service instance a "unique" id so that it does not interfere
322 : // with any other instances or tests that might be concurrently active.
323 E : std::string instance_id(base::StringPrintf("%d", ::GetCurrentProcessId()));
324 E : call_trace_service_.set_instance_id(base::UTF8ToWide(instance_id));
325 :
326 : // The instance id needs to be in the environment to be picked up by the
327 : // client library. We prefix the existing environment variable, if any.
328 E : std::unique_ptr<base::Environment> env(base::Environment::Create());
329 E : ASSERT_FALSE(env.get() == NULL);
330 E : std::string env_var;
331 E : env->GetVar(::kSyzygyRpcInstanceIdEnvVar, &env_var);
332 E : env_var.insert(0, ";");
333 E : env_var.insert(0, instance_id);
334 E : ASSERT_TRUE(env->SetVar(::kSyzygyRpcInstanceIdEnvVar, env_var));
335 :
336 : // Start our worker threads so we can use them later.
337 E : ASSERT_TRUE(worker1_.Start());
338 E : ASSERT_TRUE(worker2_.Start());
339 E : }
340 :
341 E : void TearDown() override {
342 : // Stop the worker threads.
343 E : worker2_.Stop();
344 E : worker1_.Stop();
345 :
346 : // Stop the call trace service.
347 E : EXPECT_TRUE(call_trace_service_.Stop());
348 E : EXPECT_FALSE(call_trace_service_.is_running());
349 E : EXPECT_EQ(0, call_trace_service_.num_active_sessions());
350 E : EXPECT_EQ(0, TestSessionTraceFileWriter::num_instances());
351 E : }
352 :
353 : protected:
354 : // The thread on which the trace file writer will consumer buffers and a
355 : // helper variable whose initialization we use as a trigger to start the
356 : // thread (ensuring it's message_loop is created). These declarations MUST
357 : // remain in this order and preceed that of trace_file_writer_factory_;
358 : base::Thread consumer_thread_;
359 : bool consumer_thread_has_started_;
360 :
361 : // The call trace service related objects. These declarations MUST be in
362 : // this order.
363 : TestSessionTraceFileWriterFactory session_trace_file_writer_factory_;
364 : TestService call_trace_service_;
365 : RpcServiceInstanceManager rpc_service_instance_manager_;
366 :
367 : // The directory where trace file output will be written.
368 : base::ScopedTempDir temp_dir_;
369 :
370 : // A couple of worker threads where we can dispatch closures.
371 : base::Thread worker1_;
372 : base::Thread worker2_;
373 : };
374 :
375 E : void GetNextBuffer(TestSessionPtr session, Buffer** buffer, bool* result) {
376 E : DCHECK_NE(static_cast<TestSession*>(nullptr), session.get());
377 E : DCHECK_NE(static_cast<Buffer**>(nullptr), buffer);
378 E : DCHECK_NE(static_cast<bool*>(nullptr), result);
379 E : *buffer = NULL;
380 E : *result = session->GetNextBuffer(buffer);
381 E : }
382 :
383 : } // namespace
384 :
385 E : TEST_F(SessionTest, ReturnBufferWorksAfterSessionClose) {
386 E : ASSERT_TRUE(call_trace_service_.Start(true));
387 :
388 E : TestSessionPtr session = call_trace_service_.CreateTestSession();
389 E : ASSERT_TRUE(session != NULL);
390 :
391 E : Buffer* buffer1 = NULL;
392 E : ASSERT_TRUE(session->GetNextBuffer(&buffer1));
393 E : ASSERT_TRUE(buffer1 != NULL);
394 :
395 E : ASSERT_TRUE(session->Close());
396 :
397 : // Closing the session should have forced all buffers to be submitted to
398 : // the write queue.
399 E : ASSERT_EQ(Buffer::kPendingWrite, buffer1->state);
400 :
401 : // A request for another buffer should fail.
402 E : Buffer* buffer2 = NULL;
403 E : ASSERT_FALSE(session->GetNextBuffer(&buffer2));
404 E : ASSERT_TRUE(buffer2 == NULL);
405 :
406 : // Returning the original buffer should be a noop, but it should succeed.
407 : // Most of all, it shouldn't cause a race condition.
408 E : ASSERT_TRUE(session->ReturnBuffer(buffer1));
409 :
410 : // Let's allow the outstanding buffers to be written.
411 E : session->AllowBuffersToBeRecycled(9999);
412 E : }
413 :
414 E : TEST_F(SessionTest, BackPressureWorks) {
415 : // Configure things so that back-pressure will be easily forced.
416 E : call_trace_service_.set_max_buffers_pending_write(1);
417 E : ASSERT_TRUE(call_trace_service_.Start(true));
418 :
419 E : TestSessionPtr session = call_trace_service_.CreateTestSession();
420 E : ASSERT_TRUE(session != NULL);
421 :
422 E : Buffer* buffer1 = NULL;
423 E : ASSERT_TRUE(session->GetNextBuffer(&buffer1));
424 E : ASSERT_TRUE(buffer1 != NULL);
425 :
426 E : Buffer* buffer2 = NULL;
427 E : ASSERT_TRUE(session->GetNextBuffer(&buffer2));
428 E : ASSERT_TRUE(buffer2 != NULL);
429 :
430 : // Return both buffers so we have 2 pending writes. Neither of these will
431 : // go through because we have not allowed any buffers to be written yet.
432 E : ASSERT_TRUE(session->ReturnBuffer(buffer1));
433 E : ASSERT_TRUE(session->ReturnBuffer(buffer2));
434 :
435 : // We don't care about events up until this point.
436 E : session->ClearWaitingForBufferToBeRecycledState();
437 :
438 : // Start the buffer getter. This launches another thread that will try to
439 : // get another buffer. This will be blocked because of the pending writes.
440 E : bool result3 = false;
441 E : Buffer* buffer3 = NULL;
442 E : base::Closure buffer_getter3 = base::Bind(
443 : &GetNextBuffer, session, &buffer3, &result3);
444 E : worker1_.message_loop()->PostTask(FROM_HERE, buffer_getter3);
445 :
446 : // Wait for the session to start applying back-pressure. This occurs when it
447 : // has indicated that it is waiting for a buffer to be written.
448 E : session->PauseUntilWaitingForBufferToBeRecycled();
449 :
450 : // Allow a single buffer to be written.
451 E : session->AllowBuffersToBeRecycled(1);
452 :
453 : // Wait for the buffer getter to complete.
454 E : worker1_.Stop();
455 :
456 : // Ensure the buffer was a recycled forced wait.
457 E : ASSERT_TRUE(result3);
458 E : ASSERT_EQ(buffer1, buffer3);
459 :
460 : // Return the last buffer and allow everything to be written.
461 E : ASSERT_TRUE(session->ReturnBuffer(buffer3));
462 E : session->AllowBuffersToBeRecycled(9999);
463 E : }
464 :
465 E : TEST_F(SessionTest, BackPressureIsLimited) {
466 : // Configure things so that back-pressure will be easily forced.
467 E : call_trace_service_.set_max_buffers_pending_write(1);
468 E : ASSERT_TRUE(call_trace_service_.Start(true));
469 :
470 E : TestSessionPtr session = call_trace_service_.CreateTestSession();
471 E : ASSERT_TRUE(session != NULL);
472 :
473 E : Buffer* buffer1 = NULL;
474 E : ASSERT_TRUE(session->GetNextBuffer(&buffer1));
475 E : ASSERT_TRUE(buffer1 != NULL);
476 :
477 E : Buffer* buffer2 = NULL;
478 E : ASSERT_TRUE(session->GetNextBuffer(&buffer2));
479 E : ASSERT_TRUE(buffer2 != NULL);
480 :
481 : // Return both buffers so we have 2 pending writes. Neither of these will
482 : // go through because we have not allowed any buffers to be written yet.
483 E : ASSERT_TRUE(session->ReturnBuffer(buffer1));
484 E : ASSERT_TRUE(session->ReturnBuffer(buffer2));
485 :
486 : // Since the back-pressure threshold is 1 and we have 2 pending buffers
487 : // if 1 is recycled it will bring us below the back-pressure threshold. Thus
488 : // if we pile on a lot of buffer requests, only the first one should apply
489 : // back-pressure, and the next ones should cause an allocation.
490 :
491 : // We don't care about events up until this point.
492 E : session->ClearWaitingForBufferToBeRecycledState();
493 E : session->ClearAllocatingBuffersState();
494 :
495 E : bool result3 = false;
496 E : Buffer* buffer3 = NULL;
497 E : base::Closure buffer_getter3 = base::Bind(
498 : &GetNextBuffer, session, &buffer3, &result3);
499 E : worker1_.message_loop()->PostTask(FROM_HERE, buffer_getter3);
500 :
501 : // Wait for the session to start applying back-pressure. This occurs when it
502 : // has indicated that it is waiting for a buffer to be written.
503 E : session->PauseUntilWaitingForBufferToBeRecycled();
504 :
505 : // At this point, there should be only one getter applying back pressure.
506 E : ASSERT_EQ(1u, session->buffer_requests_waiting_for_recycle());
507 :
508 : // Allocate yet another buffer on a new thread, this will force an allocation
509 : // which in turn will satisfy as many waits as there are buffers allocated.
510 E : bool result4 = false;
511 E : Buffer* buffer4 = NULL;
512 E : base::Closure buffer_getter4 = base::Bind(
513 : &GetNextBuffer, session, &buffer4, &result4);
514 E : worker2_.message_loop()->PostTask(FROM_HERE, buffer_getter4);
515 :
516 : // Similarly, wait for an allocation. The second buffer getter should cause
517 : // one to occur.
518 E : session->PauseUntilAllocatingBuffers();
519 :
520 : // Allow a single buffer to be written.
521 E : session->AllowBuffersToBeRecycled(1);
522 :
523 : // Wait for the buffer getters to complete.
524 E : worker1_.Stop();
525 E : worker2_.Stop();
526 E : ASSERT_TRUE(result3);
527 E : ASSERT_TRUE(result4);
528 :
529 : // We can't guarantee where the returned buffers come from (recycled or
530 : // not), just that they should be returned.
531 E : ASSERT_TRUE(buffer3 != NULL);
532 E : ASSERT_TRUE(buffer4 != NULL);
533 :
534 : // Return the last 2 buffers and allow everything to be written.
535 E : ASSERT_TRUE(session->ReturnBuffer(buffer3));
536 E : ASSERT_TRUE(session->ReturnBuffer(buffer4));
537 E : session->AllowBuffersToBeRecycled(9999);
538 E : }
539 :
540 E : TEST_F(SessionTest, LargeBufferRequestAvoidsBackPressure) {
541 : // Configure things so that back-pressure will be easily forced.
542 E : call_trace_service_.set_max_buffers_pending_write(1);
543 E : ASSERT_TRUE(call_trace_service_.Start(true));
544 :
545 E : TestSessionPtr session = call_trace_service_.CreateTestSession();
546 E : ASSERT_TRUE(session != NULL);
547 :
548 E : Buffer* buffer1 = NULL;
549 E : ASSERT_TRUE(session->GetNextBuffer(&buffer1));
550 E : ASSERT_TRUE(buffer1 != NULL);
551 :
552 E : Buffer* buffer2 = NULL;
553 E : ASSERT_TRUE(session->GetNextBuffer(&buffer2));
554 E : ASSERT_TRUE(buffer2 != NULL);
555 :
556 : // Return both buffers so we have 2 pending writes. Neither of these will
557 : // go through because we have not allowed any buffers to be written yet.
558 E : ASSERT_TRUE(session->ReturnBuffer(buffer1));
559 E : ASSERT_TRUE(session->ReturnBuffer(buffer2));
560 :
561 : // Ask for a big buffer. This should go through immediately and side-step the
562 : // usual buffer pool. Thus, it is not subject to back-pressure.
563 E : Buffer* buffer3 = NULL;
564 E : ASSERT_TRUE(session->GetBuffer(10 * 1024 * 1024, &buffer3));
565 E : ASSERT_EQ(10u * 1024 * 1024, buffer3->mapping_size);
566 E : ASSERT_EQ(10u * 1024 * 1024, buffer3->buffer_size);
567 E : ASSERT_EQ(0u, buffer3->buffer_offset);
568 :
569 : // Return the buffer and allow them all to be recycled.
570 E : ASSERT_TRUE(session->ReturnBuffer(buffer3));
571 E : session->AllowBuffersToBeRecycled(9999);
572 :
573 : // Wait until the singleton buffer has been destroyed.
574 E : session->PauseUntilDestroyingSingletonBuffer();
575 E : ASSERT_EQ(1, session->singleton_buffers_destroyed_);
576 E : ASSERT_EQ(buffer3, session->last_singleton_buffer_destroyed_);
577 E : }
578 :
579 : } // namespace service
580 : } // namespace trace
|