1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 : //
15 : // This file implements the trace::service::Session class, which manages
16 : // the trace file and buffers for a given client of the call trace service.
17 : //
18 : // TODO(rogerm): Reduce the scope over which the session lock is held. The
19 : // RPC machinery ensures that calls to a given session are serialized, so
20 : // we only have to protect the parts that might interact with the writer
21 : // thread.
22 :
23 : #include "syzygy/trace/service/session.h"
24 :
25 : #include <time.h>
26 :
27 : #include "base/command_line.h"
28 : #include "base/logging.h"
29 : #include "base/string_util.h"
30 : #include "base/stringprintf.h"
31 : #include "base/memory/scoped_ptr.h"
32 : #include "sawbuck/common/com_utils.h"
33 : #include "syzygy/common/align.h"
34 : #include "syzygy/common/buffer_writer.h"
35 : #include "syzygy/common/path_util.h"
36 : #include "syzygy/trace/protocol/call_trace_defs.h"
37 : #include "syzygy/trace/service/service.h"
38 :
39 : namespace trace {
40 : namespace service {
41 :
42 : namespace {
43 :
44 : using base::ProcessId;
45 :
46 : // Helper for logging Buffer::ID values.
47 i : std::ostream& operator << (std::ostream& stream, const Buffer::ID& buffer_id) {
48 : return stream << "shared_memory_handle=0x" << std::hex << buffer_id.first
49 i : << ", buffer_offset=0x" << std::hex << buffer_id.second;
50 i : }
51 :
52 : } // namespace
53 :
54 : Session::Session(Service* call_trace_service)
55 : : call_trace_service_(call_trace_service),
56 : is_closing_(false),
57 : buffer_consumer_(NULL),
58 : buffer_requests_waiting_for_recycle_(0),
59 : buffer_is_available_(&lock_),
60 : buffer_id_(0),
61 E : input_error_already_logged_(false) {
62 E : DCHECK(call_trace_service != NULL);
63 E : ::memset(buffer_state_counts_, 0, sizeof(buffer_state_counts_));
64 :
65 E : call_trace_service->AddOneActiveSession();
66 E : }
67 :
68 E : Session::~Session() {
69 : // We expect all of the buffers to be available, and none of them to be
70 : // outstanding.
71 E : DCHECK(call_trace_service_ != NULL);
72 : DCHECK_EQ(buffers_available_.size(),
73 E : buffer_state_counts_[Buffer::kAvailable]);
74 E : DCHECK_EQ(buffers_.size(), buffer_state_counts_[Buffer::kAvailable]);
75 E : DCHECK_EQ(0u, buffer_state_counts_[Buffer::kInUse]);
76 E : DCHECK_EQ(0u, buffer_state_counts_[Buffer::kPendingWrite]);
77 :
78 : // Not strictly necessary, but let's make sure nothing refers to the
79 : // client buffers before we delete the underlying memory.
80 E : buffers_.clear();
81 E : buffers_available_.clear();
82 :
83 : // The session owns all of its shared memory buffers using raw pointers
84 : // inserted into the shared_memory_buffers_ list.
85 E : while (!shared_memory_buffers_.empty()) {
86 E : BufferPool* pool = shared_memory_buffers_.back();
87 E : shared_memory_buffers_.pop_back();
88 E : delete pool;
89 E : }
90 :
91 : // TODO(rogerm): Perhaps this code should be tied to the last recycled buffer
92 : // after is_closing_ (see RecycleBuffer()). It is bothersome to tie logic
93 : // (the services view of the number of active sessions) to the lifetime
94 : // of the objects in memory. Arguably, this applies to all of the above
95 : // code.
96 E : if (buffer_consumer_ != NULL) {
97 E : buffer_consumer_->Close(this);
98 E : buffer_consumer_ = static_cast<BufferConsumer*>(NULL);
99 : }
100 :
101 E : call_trace_service_->RemoveOneActiveSession();
102 E : }
103 :
104 E : bool Session::Init(ProcessId client_process_id) {
105 E : if (!InitializeProcessInfo(client_process_id, &client_))
106 i : return false;
107 :
108 E : return true;
109 E : }
110 :
111 E : bool Session::Close() {
112 E : std::vector<Buffer*> buffers;
113 E : base::AutoLock lock(lock_);
114 :
115 : // It's possible that the service is being stopped just after this session
116 : // was marked for closure. The service would then attempt to re-close the
117 : // session. Let's ignore these requests.
118 E : if (is_closing_)
119 E : return true;
120 :
121 : // Otherwise the session is being asked to close for the first time.
122 E : is_closing_ = true;
123 :
124 : // We'll reserve space for the worst case scenario buffer count.
125 E : buffers.reserve(buffer_state_counts_[Buffer::kInUse] + 1);
126 :
127 : // Schedule any outstanding buffers for flushing.
128 E : for (BufferMap::iterator it = buffers_.begin(); it != buffers_.end(); ++it) {
129 E : Buffer* buffer = it->second;
130 E : DCHECK(buffer != NULL);
131 E : if (buffer->state == Buffer::kInUse) {
132 E : ChangeBufferState(Buffer::kPendingWrite, buffer);
133 E : buffer_consumer_->ConsumeBuffer(buffer);
134 : }
135 E : }
136 :
137 : // Create a process ended event. This causes at least one buffer to be in
138 : // use to store the process ended event.
139 E : Buffer* buffer = NULL;
140 E : if (CreateProcessEndedEvent(&buffer)) {
141 E : DCHECK(buffer != NULL);
142 E : ChangeBufferState(Buffer::kPendingWrite, buffer);
143 E : buffer_consumer_->ConsumeBuffer(buffer);
144 : }
145 :
146 E : return true;
147 E : }
148 :
149 : bool Session::FindBuffer(CallTraceBuffer* call_trace_buffer,
150 E : Buffer** client_buffer) {
151 E : DCHECK(call_trace_buffer != NULL);
152 E : DCHECK(client_buffer != NULL);
153 :
154 E : base::AutoLock lock(lock_);
155 :
156 E : Buffer::ID buffer_id = Buffer::GetID(*call_trace_buffer);
157 :
158 E : BufferMap::iterator iter = buffers_.find(buffer_id);
159 E : if (iter == buffers_.end()) {
160 i : if (!input_error_already_logged_) {
161 i : LOG(ERROR) << "Received call trace buffer not in use for this session "
162 : << "[pid=" << client_.process_id << ", " << buffer_id << "].";
163 i : input_error_already_logged_ = true;
164 : }
165 i : return false;
166 : }
167 :
168 : #ifndef NDEBUG
169 : // Make sure fields that are not part of the ID also match. The client
170 : // shouldn't be playing with any of the call_trace_buffer fields.
171 : if (call_trace_buffer->mapping_size != iter->second->mapping_size ||
172 E : call_trace_buffer->buffer_size != iter->second->buffer_size) {
173 i : LOG(WARNING) << "Received call trace buffer with mismatched attributes.";
174 : }
175 : #endif
176 :
177 E : *client_buffer = iter->second;
178 E : return true;
179 E : }
180 :
181 E : bool Session::GetNextBuffer(Buffer** out_buffer) {
182 E : return GetBuffer(0, out_buffer);
183 E : }
184 :
185 E : bool Session::GetBuffer(size_t minimum_size, Buffer** out_buffer) {
186 E : DCHECK(out_buffer != NULL);
187 :
188 E : *out_buffer = NULL;
189 E : base::AutoLock lock(lock_);
190 :
191 : // Once we're closing we should not hand out any more buffers.
192 E : if (is_closing_) {
193 E : LOG(ERROR) << "Session is closing but someone is trying to get a buffer.";
194 E : return false;
195 : }
196 :
197 : // If this is an ordinary buffer request, delegate to the usual channel.
198 E : if (minimum_size <= call_trace_service_->buffer_size_in_bytes()) {
199 E : if (!GetNextBufferUnlocked(out_buffer))
200 i : return false;
201 E : return true;
202 : }
203 :
204 E : if (!AllocateBufferForImmediateUse(minimum_size, out_buffer))
205 i : return false;
206 :
207 E : return true;
208 E : }
209 :
210 E : bool Session::ReturnBuffer(Buffer* buffer) {
211 E : DCHECK(buffer != NULL);
212 E : DCHECK(buffer->session == this);
213 :
214 : {
215 E : base::AutoLock lock(lock_);
216 :
217 : // If we're in the middle of closing, we ignore any ReturnBuffer requests
218 : // as we've already manually pushed them out for writing.
219 E : if (is_closing_)
220 E : return true;
221 :
222 E : ChangeBufferState(Buffer::kPendingWrite, buffer);
223 E : }
224 :
225 : // Hand the buffer over to the consumer.
226 E : if (!buffer_consumer_->ConsumeBuffer(buffer)) {
227 i : LOG(ERROR) << "Unable to schedule buffer for writing.";
228 i : return false;
229 : }
230 :
231 E : return true;
232 E : }
233 :
234 E : bool Session::RecycleBuffer(Buffer* buffer) {
235 E : DCHECK(buffer != NULL);
236 E : DCHECK(buffer->session == this);
237 :
238 : // Is this a special singleton buffer? If so, we don't want to return it to
239 : // the pool but rather destroy it immediately.
240 : size_t normal_buffer_size = common::AlignUp(
241 : call_trace_service_->buffer_size_in_bytes(),
242 E : buffer_consumer_->block_size());
243 : if (buffer->buffer_offset == 0 &&
244 : buffer->mapping_size == buffer->buffer_size &&
245 E : buffer->buffer_size > normal_buffer_size) {
246 E : if (!DestroySingletonBuffer(buffer))
247 i : return false;
248 E : return true;
249 : }
250 :
251 E : base::AutoLock lock(lock_);
252 :
253 E : ChangeBufferState(Buffer::kAvailable, buffer);
254 E : buffers_available_.push_front(buffer);
255 E : buffer_is_available_.Signal();
256 :
257 : // If the session is closing and all outstanding buffers have been recycled
258 : // then it's safe to destroy this session.
259 : if (is_closing_ && buffer_state_counts_[Buffer::kInUse] == 0 &&
260 E : buffer_state_counts_[Buffer::kPendingWrite] == 0) {
261 : // If all buffers have been recycled, then all the buffers we own must be
262 : // available. When we start closing we refuse to hand out further buffers
263 : // so this must eventually happen, unless the write queue hangs.
264 E : DCHECK_EQ(buffers_.size(), buffer_state_counts_[Buffer::kAvailable]);
265 : DCHECK_EQ(buffers_available_.size(),
266 E : buffer_state_counts_[Buffer::kAvailable]);
267 : }
268 :
269 E : return true;
270 E : }
271 :
272 E : void Session::ChangeBufferState(BufferState new_state, Buffer* buffer) {
273 E : DCHECK(buffer != NULL);
274 E : DCHECK(buffer->session == this);
275 E : lock_.AssertAcquired();
276 :
277 E : BufferState old_state = buffer->state;
278 :
279 : // Ensure the state transition is valid.
280 : DCHECK_EQ(static_cast<int>(new_state),
281 E : (static_cast<int>(old_state) + 1) % Buffer::kBufferStateMax);
282 :
283 : // Apply the state change.
284 E : buffer->state = new_state;
285 E : buffer_state_counts_[old_state]--;
286 E : buffer_state_counts_[new_state]++;
287 E : }
288 :
289 : bool Session::InitializeProcessInfo(ProcessId process_id,
290 E : ProcessInfo* client) {
291 E : DCHECK(client != NULL);
292 :
293 E : if (!client->Initialize(process_id)) {
294 i : LOG(ERROR) << "Failed to initialize client info for PID=" << process_id
295 : << ".";
296 i : return false;
297 : }
298 :
299 E : return true;
300 E : }
301 :
302 : bool Session::CopyBufferHandleToClient(HANDLE client_process_handle,
303 : HANDLE local_handle,
304 E : HANDLE* client_copy) {
305 E : DCHECK(client_process_handle != NULL);
306 E : DCHECK(local_handle != NULL);
307 E : DCHECK(client_copy != NULL);
308 :
309 : // Duplicate the mapping handle into the client process.
310 : if (!::DuplicateHandle(::GetCurrentProcess(),
311 : local_handle,
312 : client_process_handle,
313 : client_copy,
314 : 0,
315 : FALSE,
316 E : DUPLICATE_SAME_ACCESS)) {
317 i : DWORD error = ::GetLastError();
318 i : LOG(ERROR) << "Failed to copy shared memory handle into client process: "
319 : << com::LogWe(error) << ".";
320 i : return false;
321 : }
322 :
323 E : return true;
324 E : }
325 :
326 : bool Session::AllocateBufferPool(
327 E : size_t num_buffers, size_t buffer_size, BufferPool** out_pool) {
328 E : DCHECK_GT(num_buffers, 0u);
329 E : DCHECK_GT(buffer_size, 0u);
330 E : DCHECK(out_pool != NULL);
331 E : lock_.AssertAcquired();
332 :
333 E : *out_pool = NULL;
334 :
335 : // Allocate the record for the shared memory buffer.
336 E : scoped_ptr<BufferPool> pool(new BufferPool());
337 E : if (pool.get() == NULL) {
338 i : LOG(ERROR) << "Failed to allocate shared memory buffer.";
339 i : return false;
340 : }
341 :
342 : // Initialize the shared buffer pool.
343 E : buffer_size = common::AlignUp(buffer_size, buffer_consumer_->block_size());
344 E : if (!pool->Init(this, num_buffers, buffer_size)) {
345 i : LOG(ERROR) << "Failed to initialize shared memory buffer.";
346 i : return false;
347 : }
348 :
349 : // Copy the buffer pool handle to the client process.
350 E : HANDLE client_handle = NULL;
351 E : if (is_closing_) {
352 : // If the session is closing, there's no reason to copy the handle to the
353 : // client, nor is there good reason to believe that'll succeed, as the
354 : // process may be gone. Instead, to ensure the buffers have unique IDs,
355 : // we assign them a locally unique identifier in the guise of a handle.
356 : //
357 : // HACK: we know that handle values are multiple of four, so to make sure
358 : // our IDs don't collide, we make them odd.
359 : // See http://blogs.msdn.com/b/oldnewthing/archive/2005/01/21/358109.aspx.
360 i : client_handle = reinterpret_cast<HANDLE>((++buffer_id_ * 2) + 1);
361 i : } else {
362 : if (!CopyBufferHandleToClient(client_.process_handle.Get(),
363 : pool->handle(),
364 E : &client_handle)) {
365 i : return false;
366 : }
367 : }
368 E : DCHECK(client_handle != NULL);
369 :
370 E : pool->SetClientHandle(client_handle);
371 :
372 : // Save the shared memory block so that it's managed by the session.
373 E : shared_memory_buffers_.push_back(pool.get());
374 E : *out_pool = pool.release();
375 :
376 E : return true;
377 E : }
378 :
379 E : bool Session::AllocateBuffers(size_t num_buffers, size_t buffer_size) {
380 E : DCHECK_GT(num_buffers, 0u);
381 E : DCHECK_GT(buffer_size, 0u);
382 E : lock_.AssertAcquired();
383 :
384 E : BufferPool* pool_ptr = NULL;
385 E : if (!AllocateBufferPool(num_buffers, buffer_size, &pool_ptr)) {
386 i : LOG(ERROR) << "Failed to allocate buffer pool.";
387 i : return false;
388 : }
389 :
390 : // Put the client buffers into the list of available buffers and update
391 : // the buffer state information.
392 E : for (Buffer* buf = pool_ptr->begin(); buf != pool_ptr->end(); ++buf) {
393 E : Buffer::ID buffer_id = Buffer::GetID(*buf);
394 :
395 E : buf->state = Buffer::kAvailable;
396 E : CHECK(buffers_.insert(std::make_pair(buffer_id, buf)).second);
397 :
398 E : buffer_state_counts_[Buffer::kAvailable]++;
399 E : buffers_available_.push_back(buf);
400 E : buffer_is_available_.Signal();
401 E : }
402 :
403 E : DCHECK(BufferBookkeepingIsConsistent());
404 :
405 E : return true;
406 E : }
407 :
408 : bool Session::AllocateBufferForImmediateUse(size_t minimum_size,
409 E : Buffer** out_buffer) {
410 E : DCHECK_LT(call_trace_service_->buffer_size_in_bytes(), minimum_size);
411 E : DCHECK(out_buffer != NULL);
412 E : lock_.AssertAcquired();
413 :
414 E : BufferPool* pool_ptr = NULL;
415 E : if (!AllocateBufferPool(1, minimum_size, &pool_ptr)) {
416 i : LOG(ERROR) << "Failed to allocate buffer pool.";
417 i : return false;
418 : }
419 :
420 : // Get the buffer.
421 E : DCHECK_EQ(pool_ptr->begin() + 1, pool_ptr->end());
422 E : Buffer* buffer = pool_ptr->begin();
423 E : Buffer::ID buffer_id = Buffer::GetID(*buffer);
424 :
425 : // Update the bookkeeping.
426 E : buffer->state = Buffer::kInUse;
427 E : CHECK(buffers_.insert(std::make_pair(buffer_id, buffer)).second);
428 E : buffer_state_counts_[Buffer::kInUse]++;
429 :
430 E : DCHECK(BufferBookkeepingIsConsistent());
431 :
432 E : *out_buffer = buffer;
433 :
434 E : return true;
435 E : }
436 :
437 E : bool Session::GetNextBufferUnlocked(Buffer** out_buffer) {
438 E : DCHECK(out_buffer != NULL);
439 E : lock_.AssertAcquired();
440 :
441 E : *out_buffer = NULL;
442 :
443 : // If we have too many pending writes, let's wait until one of those has
444 : // been completed and recycle that buffer. This provides some back-pressure
445 : // on our allocation mechanism.
446 : //
447 : // Note that this back-pressure maximum simply reduces the amount of
448 : // memory that will be used in common scenarios. It is still possible to
449 : // have unbounded memory growth in two ways:
450 : //
451 : // (1) Having an unbounded number of processes, and hence sessions. Each
452 : // session creates an initial pool of buffers for itself.
453 : //
454 : // (2) Having an unbounded number of threads with outstanding (partially
455 : // filled and not returned for writing) buffers. The lack of buffers
456 : // pending writes will force further allocations as new threads come
457 : // looking for buffers.
458 : //
459 : // We have to be careful that we don't pile up arbitrary many threads waiting
460 : // for a finite number of buffers that will be recycled. Hence, we count the
461 : // number of requests applying back-pressure.
462 E : while (buffers_available_.empty()) {
463 : // Figure out how many buffers we can force to be recycled according to our
464 : // threshold and the number of write-pending buffers.
465 E : size_t buffers_force_recyclable = 0;
466 : if (buffer_state_counts_[Buffer::kPendingWrite] >
467 E : call_trace_service_->max_buffers_pending_write()) {
468 : buffers_force_recyclable = buffer_state_counts_[Buffer::kPendingWrite] -
469 E : call_trace_service_->max_buffers_pending_write();
470 : }
471 :
472 : // If there's still room to do so, wait rather than allocating immediately.
473 : // This will either force us to wait until a buffer has been written and
474 : // recycled, or if the request volume is high enough we'll likely be
475 : // satisfied by an allocation.
476 E : if (buffer_requests_waiting_for_recycle_ < buffers_force_recyclable) {
477 E : ++buffer_requests_waiting_for_recycle_;
478 E : OnWaitingForBufferToBeRecycled(); // Unittest hook.
479 E : buffer_is_available_.Wait();
480 E : --buffer_requests_waiting_for_recycle_;
481 E : } else {
482 : // Otherwise, force an allocation.
483 : if (!AllocateBuffers(call_trace_service_->num_incremental_buffers(),
484 E : call_trace_service_->buffer_size_in_bytes())) {
485 i : return false;
486 : }
487 : }
488 E : }
489 E : DCHECK(!buffers_available_.empty());
490 :
491 E : Buffer* buffer = buffers_available_.front();
492 E : buffers_available_.pop_front();
493 E : ChangeBufferState(Buffer::kInUse, buffer);
494 :
495 E : *out_buffer = buffer;
496 E : return true;
497 E : }
498 :
499 E : bool Session::DestroySingletonBuffer(Buffer* buffer) {
500 E : DCHECK(buffer != NULL);
501 E : DCHECK_EQ(0u, buffer->buffer_offset);
502 E : DCHECK_EQ(buffer->mapping_size, buffer->buffer_size);
503 E : DCHECK_EQ(Buffer::kPendingWrite, buffer->state);
504 :
505 E : base::AutoLock lock(lock_);
506 :
507 : // Look for the pool that houses this buffer.
508 E : SharedMemoryBufferCollection::iterator it = shared_memory_buffers_.begin();
509 E : BufferPool* pool = NULL;
510 E : for (; it != shared_memory_buffers_.end(); ++it) {
511 E : pool = *it;
512 E : if (buffer >= pool->begin() && buffer < pool->end())
513 E : break;
514 E : }
515 :
516 : // Didn't find the pool?
517 E : if (it == shared_memory_buffers_.end()) {
518 i : LOG(ERROR) << "Unable to find pool for buffer to be destroyed.";
519 i : return false;
520 : }
521 :
522 : // If the pool contains more than one buffer, bail.
523 E : if (pool->end() - pool->begin() > 1) {
524 i : LOG(ERROR) << "Trying to destroy a pool that contains more than 1 buffer.";
525 i : return false;
526 : }
527 :
528 : // Call our testing seam notification.
529 E : OnDestroySingletonBuffer(buffer);
530 :
531 : // Remove the pool from our collection of pools.
532 E : shared_memory_buffers_.erase(it);
533 :
534 : // Remove the buffer from the buffer map.
535 E : CHECK_EQ(1u, buffers_.erase(Buffer::GetID(*buffer)));
536 :
537 : // Remove the buffer from our buffer statistics.
538 E : buffer_state_counts_[Buffer::kPendingWrite]--;
539 E : DCHECK(BufferBookkeepingIsConsistent());
540 :
541 : // Finally, delete the pool. This will clean up the buffer.
542 E : delete pool;
543 :
544 E : return true;
545 E : }
546 :
547 E : bool Session::CreateProcessEndedEvent(Buffer** buffer) {
548 E : DCHECK(buffer != NULL);
549 E : lock_.AssertAcquired();
550 :
551 E : *buffer = NULL;
552 :
553 : // We output a segment that contains a single empty event. That is, the
554 : // event consists only of a prefix whose data size is set to zero. The buffer
555 : // will be populated with the following:
556 : //
557 : // RecordPrefix: the prefix for the TraceFileSegmentHeader which follows
558 : // (with type TraceFileSegmentHeader::kTypeId).
559 : // TraceFileSegmentHeader: the segment header for the segment represented
560 : // by this buffer.
561 : // RecordPrefix: the prefix for the event itself (with type
562 : // TRACE_PROCESS_ENDED). This prefix will have a data size of zero
563 : // indicating that no structure follows.
564 : const size_t kBufferSize = sizeof(RecordPrefix) +
565 E : sizeof(TraceFileSegmentHeader) + sizeof(RecordPrefix);
566 :
567 : // Ensure that a free buffer exists.
568 E : if (buffers_available_.empty()) {
569 i : if (!AllocateBuffers(1, kBufferSize)) {
570 i : LOG(ERROR) << "Unable to allocate buffer for process ended event.";
571 i : return false;
572 : }
573 : }
574 E : DCHECK(!buffers_available_.empty());
575 :
576 : // Get a buffer for the event.
577 E : if (!GetNextBufferUnlocked(buffer) || *buffer == NULL) {
578 i : LOG(ERROR) << "Unable to get a buffer for process ended event.";
579 i : return false;
580 : }
581 E : DCHECK(*buffer != NULL);
582 :
583 : // This should pretty much never happen as we always allocate really big
584 : // buffers, but it is possible.
585 E : if ((*buffer)->buffer_size < kBufferSize) {
586 i : LOG(ERROR) << "Buffer too small for process ended event.";
587 i : return false;
588 : }
589 :
590 : // Populate the various structures in the buffer.
591 :
592 : RecordPrefix* segment_prefix =
593 E : reinterpret_cast<RecordPrefix*>((*buffer)->data_ptr);
594 E : DWORD timestamp = ::GetTickCount();
595 E : segment_prefix->timestamp = timestamp;
596 E : segment_prefix->size = sizeof(TraceFileSegmentHeader);
597 E : segment_prefix->type = TraceFileSegmentHeader::kTypeId;
598 E : segment_prefix->version.hi = TRACE_VERSION_HI;
599 E : segment_prefix->version.lo = TRACE_VERSION_LO;
600 :
601 : TraceFileSegmentHeader* segment_header =
602 E : reinterpret_cast<TraceFileSegmentHeader*>(segment_prefix + 1);
603 E : segment_header->thread_id = 0;
604 E : segment_header->segment_length = sizeof(RecordPrefix);
605 :
606 : RecordPrefix* event_prefix =
607 E : reinterpret_cast<RecordPrefix*>(segment_header + 1);
608 E : event_prefix->timestamp = timestamp;
609 E : event_prefix->size = 0;
610 E : event_prefix->type = TRACE_PROCESS_ENDED;
611 E : event_prefix->version.hi = TRACE_VERSION_HI;
612 E : event_prefix->version.lo = TRACE_VERSION_LO;
613 :
614 E : return true;
615 E : }
616 :
617 E : bool Session::BufferBookkeepingIsConsistent() const {
618 E : lock_.AssertAcquired();
619 :
620 : size_t buffer_states_ = buffer_state_counts_[Buffer::kAvailable] +
621 : buffer_state_counts_[Buffer::kInUse] +
622 E : buffer_state_counts_[Buffer::kPendingWrite];
623 E : if (buffer_states_ != buffers_.size())
624 i : return false;
625 :
626 E : if (buffers_available_.size() != buffer_state_counts_[Buffer::kAvailable])
627 i : return false;
628 E : return true;
629 E : }
630 :
631 : } // namespace service
632 : } // namespace trace
|