1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 : //
15 : // This file implements the trace::service::Session class, which manages
16 : // the trace file and buffers for a given client of the call trace service.
17 : //
18 : // TODO(rogerm): Reduce the scope over which the session lock is held. The
19 : // RPC machinery ensures that calls to a given session are serialized, so
20 : // we only have to protect the parts that might interact with the writer
21 : // thread.
22 :
23 : #include "syzygy/trace/service/session.h"
24 :
25 : #include <time.h>
26 : #include <memory>
27 :
28 : #include "base/command_line.h"
29 : #include "base/logging.h"
30 : #include "base/strings/string_util.h"
31 : #include "base/strings/stringprintf.h"
32 : #include "syzygy/common/align.h"
33 : #include "syzygy/common/buffer_writer.h"
34 : #include "syzygy/common/com_utils.h"
35 : #include "syzygy/common/path_util.h"
36 : #include "syzygy/trace/protocol/call_trace_defs.h"
37 : #include "syzygy/trace/service/mapped_buffer.h"
38 : #include "syzygy/trace/service/service.h"
39 :
40 : namespace trace {
41 : namespace service {
42 :
43 : namespace {
44 :
45 : using base::ProcessId;
46 :
47 : // Helper for logging Buffer::ID values.
48 i : std::ostream& operator << (std::ostream& stream, const Buffer::ID& buffer_id) {
49 i : return stream << "shared_memory_handle=0x" << std::hex << buffer_id.first
50 : << ", buffer_offset=0x" << std::hex << buffer_id.second;
51 i : }
52 :
53 : } // namespace
54 :
55 : Session::Session(Service* call_trace_service)
56 E : : call_trace_service_(call_trace_service),
57 E : is_closing_(false),
58 E : buffer_consumer_(NULL),
59 E : buffer_requests_waiting_for_recycle_(0),
60 E : buffer_is_available_(&lock_),
61 E : buffer_id_(0),
62 E : input_error_already_logged_(false) {
63 E : DCHECK(call_trace_service != NULL);
64 E : ::memset(buffer_state_counts_, 0, sizeof(buffer_state_counts_));
65 :
66 E : call_trace_service->AddOneActiveSession();
67 E : }
68 :
69 E : Session::~Session() {
70 : // We expect all of the buffers to be available, and none of them to be
71 : // outstanding.
72 E : DCHECK(call_trace_service_ != NULL);
73 E : DCHECK_EQ(buffers_available_.size(),
74 E : buffer_state_counts_[Buffer::kAvailable]);
75 E : DCHECK_EQ(buffers_.size(), buffer_state_counts_[Buffer::kAvailable]);
76 E : DCHECK_EQ(0u, buffer_state_counts_[Buffer::kInUse]);
77 E : DCHECK_EQ(0u, buffer_state_counts_[Buffer::kPendingWrite]);
78 :
79 : // Not strictly necessary, but let's make sure nothing refers to the
80 : // client buffers before we delete the underlying memory.
81 E : buffers_.clear();
82 E : buffers_available_.clear();
83 :
84 : // The session owns all of its shared memory buffers using raw pointers
85 : // inserted into the shared_memory_buffers_ list.
86 E : while (!shared_memory_buffers_.empty()) {
87 E : BufferPool* pool = shared_memory_buffers_.back();
88 E : shared_memory_buffers_.pop_back();
89 E : delete pool;
90 E : }
91 :
92 : // TODO(rogerm): Perhaps this code should be tied to the last recycled buffer
93 : // after is_closing_ (see RecycleBuffer()). It is bothersome to tie logic
94 : // (the services view of the number of active sessions) to the lifetime
95 : // of the objects in memory. Arguably, this applies to all of the above
96 : // code.
97 E : if (buffer_consumer_.get() != nullptr) {
98 E : buffer_consumer_->Close(this);
99 E : buffer_consumer_ = static_cast<BufferConsumer*>(NULL);
100 : }
101 :
102 E : call_trace_service_->RemoveOneActiveSession();
103 E : }
104 :
105 E : bool Session::Init(ProcessId client_process_id) {
106 E : if (!InitializeProcessInfo(client_process_id, &client_))
107 i : return false;
108 :
109 E : return true;
110 E : }
111 :
112 E : bool Session::Close() {
113 E : std::vector<Buffer*> buffers;
114 E : base::AutoLock lock(lock_);
115 :
116 : // It's possible that the service is being stopped just after this session
117 : // was marked for closure. The service would then attempt to re-close the
118 : // session. Let's ignore these requests.
119 E : if (is_closing_)
120 E : return true;
121 :
122 : // Otherwise the session is being asked to close for the first time.
123 E : is_closing_ = true;
124 :
125 : // We'll reserve space for the worst case scenario buffer count.
126 E : buffers.reserve(buffer_state_counts_[Buffer::kInUse] + 1);
127 :
128 : // Schedule any outstanding buffers for flushing.
129 E : for (BufferMap::iterator it = buffers_.begin(); it != buffers_.end(); ++it) {
130 E : Buffer* buffer = it->second;
131 E : DCHECK(buffer != NULL);
132 E : if (buffer->state == Buffer::kInUse) {
133 E : ChangeBufferState(Buffer::kPendingWrite, buffer);
134 E : buffer_consumer_->ConsumeBuffer(buffer);
135 : }
136 E : }
137 :
138 : // Create a process ended event. This causes at least one buffer to be in
139 : // use to store the process ended event.
140 E : Buffer* buffer = NULL;
141 E : if (CreateProcessEndedEvent(&buffer)) {
142 E : DCHECK(buffer != NULL);
143 E : ChangeBufferState(Buffer::kPendingWrite, buffer);
144 E : buffer_consumer_->ConsumeBuffer(buffer);
145 : }
146 :
147 E : return true;
148 E : }
149 :
150 : bool Session::FindBuffer(CallTraceBuffer* call_trace_buffer,
151 E : Buffer** client_buffer) {
152 E : DCHECK(call_trace_buffer != NULL);
153 E : DCHECK(client_buffer != NULL);
154 :
155 E : base::AutoLock lock(lock_);
156 :
157 E : Buffer::ID buffer_id = Buffer::GetID(*call_trace_buffer);
158 :
159 E : BufferMap::iterator iter = buffers_.find(buffer_id);
160 E : if (iter == buffers_.end()) {
161 i : if (!input_error_already_logged_) {
162 i : LOG(ERROR) << "Received call trace buffer not in use for this session "
163 : << "[pid=" << client_.process_id << ", " << buffer_id << "].";
164 i : input_error_already_logged_ = true;
165 : }
166 i : return false;
167 : }
168 :
169 : #ifndef NDEBUG
170 : // Make sure fields that are not part of the ID also match. The client
171 : // shouldn't be playing with any of the call_trace_buffer fields.
172 E : if (call_trace_buffer->mapping_size != iter->second->mapping_size ||
173 : call_trace_buffer->buffer_size != iter->second->buffer_size) {
174 i : LOG(WARNING) << "Received call trace buffer with mismatched attributes.";
175 : }
176 : #endif
177 :
178 E : *client_buffer = iter->second;
179 E : return true;
180 E : }
181 :
182 E : bool Session::GetNextBuffer(Buffer** out_buffer) {
183 E : return GetBuffer(0, out_buffer);
184 E : }
185 :
186 E : bool Session::GetBuffer(size_t minimum_size, Buffer** out_buffer) {
187 E : DCHECK(out_buffer != NULL);
188 :
189 E : *out_buffer = NULL;
190 E : base::AutoLock lock(lock_);
191 :
192 : // Once we're closing we should not hand out any more buffers.
193 E : if (is_closing_) {
194 E : LOG(ERROR) << "Session is closing but someone is trying to get a buffer.";
195 E : return false;
196 : }
197 :
198 : // If this is an ordinary buffer request, delegate to the usual channel.
199 E : if (minimum_size <= call_trace_service_->buffer_size_in_bytes()) {
200 E : if (!GetNextBufferUnlocked(out_buffer))
201 i : return false;
202 E : return true;
203 : }
204 :
205 E : if (!AllocateBufferForImmediateUse(minimum_size, out_buffer))
206 i : return false;
207 :
208 E : return true;
209 E : }
210 :
211 E : bool Session::ReturnBuffer(Buffer* buffer) {
212 E : DCHECK(buffer != NULL);
213 E : DCHECK(buffer->session == this);
214 :
215 : {
216 E : base::AutoLock lock(lock_);
217 :
218 : // If we're in the middle of closing, we ignore any ReturnBuffer requests
219 : // as we've already manually pushed them out for writing.
220 E : if (is_closing_)
221 E : return true;
222 :
223 E : ChangeBufferState(Buffer::kPendingWrite, buffer);
224 E : }
225 :
226 : // Hand the buffer over to the consumer.
227 E : if (!buffer_consumer_->ConsumeBuffer(buffer)) {
228 i : LOG(ERROR) << "Unable to schedule buffer for writing.";
229 i : return false;
230 : }
231 :
232 E : return true;
233 E : }
234 :
235 E : bool Session::RecycleBuffer(Buffer* buffer) {
236 E : DCHECK(buffer != NULL);
237 E : DCHECK(buffer->session == this);
238 :
239 : // Is this a special singleton buffer? If so, we don't want to return it to
240 : // the pool but rather destroy it immediately.
241 E : size_t normal_buffer_size = ::common::AlignUp(
242 : call_trace_service_->buffer_size_in_bytes(),
243 : buffer_consumer_->block_size());
244 : if (buffer->buffer_offset == 0 &&
245 E : buffer->mapping_size == buffer->buffer_size &&
246 : buffer->buffer_size > normal_buffer_size) {
247 E : if (!DestroySingletonBuffer(buffer))
248 i : return false;
249 E : return true;
250 : }
251 :
252 E : base::AutoLock lock(lock_);
253 :
254 E : ChangeBufferState(Buffer::kAvailable, buffer);
255 E : buffers_available_.push_front(buffer);
256 E : buffer_is_available_.Signal();
257 :
258 : // If the session is closing and all outstanding buffers have been recycled
259 : // then it's safe to destroy this session.
260 E : if (is_closing_ && buffer_state_counts_[Buffer::kInUse] == 0 &&
261 : buffer_state_counts_[Buffer::kPendingWrite] == 0) {
262 : // If all buffers have been recycled, then all the buffers we own must be
263 : // available. When we start closing we refuse to hand out further buffers
264 : // so this must eventually happen, unless the write queue hangs.
265 E : DCHECK_EQ(buffers_.size(), buffer_state_counts_[Buffer::kAvailable]);
266 E : DCHECK_EQ(buffers_available_.size(),
267 E : buffer_state_counts_[Buffer::kAvailable]);
268 : }
269 :
270 E : return true;
271 E : }
272 :
273 E : void Session::ChangeBufferState(BufferState new_state, Buffer* buffer) {
274 E : DCHECK(buffer != NULL);
275 E : DCHECK(buffer->session == this);
276 E : lock_.AssertAcquired();
277 :
278 E : BufferState old_state = buffer->state;
279 :
280 : // Ensure the state transition is valid.
281 E : DCHECK_EQ(static_cast<int>(new_state),
282 E : (static_cast<int>(old_state) + 1) % Buffer::kBufferStateMax);
283 :
284 : // Apply the state change.
285 E : buffer->state = new_state;
286 E : buffer_state_counts_[old_state]--;
287 E : buffer_state_counts_[new_state]++;
288 E : }
289 :
290 : bool Session::InitializeProcessInfo(ProcessId process_id,
291 E : ProcessInfo* client) {
292 E : DCHECK(client != NULL);
293 :
294 E : if (!client->Initialize(process_id)) {
295 i : LOG(ERROR) << "Failed to initialize client info for PID=" << process_id
296 : << ".";
297 i : return false;
298 : }
299 :
300 E : return true;
301 E : }
302 :
303 : bool Session::CopyBufferHandleToClient(HANDLE client_process_handle,
304 : HANDLE local_handle,
305 E : HANDLE* client_copy) {
306 E : DCHECK(client_process_handle != NULL);
307 E : DCHECK(local_handle != NULL);
308 E : DCHECK(client_copy != NULL);
309 :
310 : // Duplicate the mapping handle into the client process.
311 E : if (!::DuplicateHandle(::GetCurrentProcess(),
312 : local_handle,
313 : client_process_handle,
314 : client_copy,
315 : 0,
316 : FALSE,
317 : DUPLICATE_SAME_ACCESS)) {
318 i : DWORD error = ::GetLastError();
319 i : LOG(ERROR) << "Failed to copy shared memory handle into client process: "
320 : << ::common::LogWe(error) << ".";
321 i : return false;
322 : }
323 :
324 E : return true;
325 E : }
326 :
327 : bool Session::AllocateBufferPool(
328 E : size_t num_buffers, size_t buffer_size, BufferPool** out_pool) {
329 E : DCHECK_GT(num_buffers, 0u);
330 E : DCHECK_GT(buffer_size, 0u);
331 E : DCHECK(out_pool != NULL);
332 E : lock_.AssertAcquired();
333 :
334 E : *out_pool = NULL;
335 :
336 : // Allocate the record for the shared memory buffer.
337 E : std::unique_ptr<BufferPool> pool(new BufferPool());
338 E : if (pool.get() == NULL) {
339 i : LOG(ERROR) << "Failed to allocate shared memory buffer.";
340 i : return false;
341 : }
342 :
343 : // Initialize the shared buffer pool.
344 E : buffer_size = ::common::AlignUp(buffer_size, buffer_consumer_->block_size());
345 E : if (!pool->Init(this, num_buffers, buffer_size)) {
346 i : LOG(ERROR) << "Failed to initialize shared memory buffer.";
347 i : return false;
348 : }
349 :
350 : // Copy the buffer pool handle to the client process.
351 E : HANDLE client_handle = NULL;
352 E : if (is_closing_) {
353 : // If the session is closing, there's no reason to copy the handle to the
354 : // client, nor is there good reason to believe that'll succeed, as the
355 : // process may be gone. Instead, to ensure the buffers have unique IDs,
356 : // we assign them a locally unique identifier in the guise of a handle.
357 : //
358 : // HACK: we know that handle values are multiple of four, so to make sure
359 : // our IDs don't collide, we make them odd.
360 : // See http://blogs.msdn.com/b/oldnewthing/archive/2005/01/21/358109.aspx.
361 i : client_handle = reinterpret_cast<HANDLE>((++buffer_id_ * 2) + 1);
362 i : } else {
363 E : if (!CopyBufferHandleToClient(client_.process_handle.Get(),
364 : pool->handle(),
365 : &client_handle)) {
366 i : return false;
367 : }
368 : }
369 E : DCHECK(client_handle != NULL);
370 :
371 E : pool->SetClientHandle(client_handle);
372 :
373 : // Save the shared memory block so that it's managed by the session.
374 E : shared_memory_buffers_.push_back(pool.get());
375 E : *out_pool = pool.release();
376 :
377 E : return true;
378 E : }
379 :
380 E : bool Session::AllocateBuffers(size_t num_buffers, size_t buffer_size) {
381 E : DCHECK_GT(num_buffers, 0u);
382 E : DCHECK_GT(buffer_size, 0u);
383 E : lock_.AssertAcquired();
384 :
385 E : BufferPool* pool_ptr = NULL;
386 E : if (!AllocateBufferPool(num_buffers, buffer_size, &pool_ptr)) {
387 i : LOG(ERROR) << "Failed to allocate buffer pool.";
388 i : return false;
389 : }
390 :
391 : // Put the client buffers into the list of available buffers and update
392 : // the buffer state information.
393 E : for (Buffer* buf = pool_ptr->begin(); buf != pool_ptr->end(); ++buf) {
394 E : Buffer::ID buffer_id = Buffer::GetID(*buf);
395 :
396 E : buf->state = Buffer::kAvailable;
397 E : CHECK(buffers_.insert(std::make_pair(buffer_id, buf)).second);
398 :
399 E : buffer_state_counts_[Buffer::kAvailable]++;
400 E : buffers_available_.push_back(buf);
401 E : buffer_is_available_.Signal();
402 E : }
403 :
404 E : DCHECK(BufferBookkeepingIsConsistent());
405 :
406 E : return true;
407 E : }
408 :
409 : bool Session::AllocateBufferForImmediateUse(size_t minimum_size,
410 E : Buffer** out_buffer) {
411 E : DCHECK_LT(call_trace_service_->buffer_size_in_bytes(), minimum_size);
412 E : DCHECK(out_buffer != NULL);
413 E : lock_.AssertAcquired();
414 :
415 E : BufferPool* pool_ptr = NULL;
416 E : if (!AllocateBufferPool(1, minimum_size, &pool_ptr)) {
417 i : LOG(ERROR) << "Failed to allocate buffer pool.";
418 i : return false;
419 : }
420 :
421 : // Get the buffer.
422 E : DCHECK_EQ(pool_ptr->begin() + 1, pool_ptr->end());
423 E : Buffer* buffer = pool_ptr->begin();
424 E : Buffer::ID buffer_id = Buffer::GetID(*buffer);
425 :
426 : // Update the bookkeeping.
427 E : buffer->state = Buffer::kInUse;
428 E : CHECK(buffers_.insert(std::make_pair(buffer_id, buffer)).second);
429 E : buffer_state_counts_[Buffer::kInUse]++;
430 :
431 E : DCHECK(BufferBookkeepingIsConsistent());
432 :
433 E : *out_buffer = buffer;
434 :
435 E : return true;
436 E : }
437 :
438 E : bool Session::GetNextBufferUnlocked(Buffer** out_buffer) {
439 E : DCHECK(out_buffer != NULL);
440 E : lock_.AssertAcquired();
441 :
442 E : *out_buffer = NULL;
443 :
444 : // If we have too many pending writes, let's wait until one of those has
445 : // been completed and recycle that buffer. This provides some back-pressure
446 : // on our allocation mechanism.
447 : //
448 : // Note that this back-pressure maximum simply reduces the amount of
449 : // memory that will be used in common scenarios. It is still possible to
450 : // have unbounded memory growth in two ways:
451 : //
452 : // (1) Having an unbounded number of processes, and hence sessions. Each
453 : // session creates an initial pool of buffers for itself.
454 : //
455 : // (2) Having an unbounded number of threads with outstanding (partially
456 : // filled and not returned for writing) buffers. The lack of buffers
457 : // pending writes will force further allocations as new threads come
458 : // looking for buffers.
459 : //
460 : // We have to be careful that we don't pile up arbitrary many threads waiting
461 : // for a finite number of buffers that will be recycled. Hence, we count the
462 : // number of requests applying back-pressure.
463 E : while (buffers_available_.empty()) {
464 : // Figure out how many buffers we can force to be recycled according to our
465 : // threshold and the number of write-pending buffers.
466 E : size_t buffers_force_recyclable = 0;
467 E : if (buffer_state_counts_[Buffer::kPendingWrite] >
468 : call_trace_service_->max_buffers_pending_write()) {
469 E : buffers_force_recyclable = buffer_state_counts_[Buffer::kPendingWrite] -
470 : call_trace_service_->max_buffers_pending_write();
471 : }
472 :
473 : // If there's still room to do so, wait rather than allocating immediately.
474 : // This will either force us to wait until a buffer has been written and
475 : // recycled, or if the request volume is high enough we'll likely be
476 : // satisfied by an allocation.
477 E : if (buffer_requests_waiting_for_recycle_ < buffers_force_recyclable) {
478 E : ++buffer_requests_waiting_for_recycle_;
479 E : OnWaitingForBufferToBeRecycled(); // Unittest hook.
480 E : buffer_is_available_.Wait();
481 E : --buffer_requests_waiting_for_recycle_;
482 E : } else {
483 : // Otherwise, force an allocation.
484 E : if (!AllocateBuffers(call_trace_service_->num_incremental_buffers(),
485 : call_trace_service_->buffer_size_in_bytes())) {
486 i : return false;
487 : }
488 : }
489 E : }
490 E : DCHECK(!buffers_available_.empty());
491 :
492 E : Buffer* buffer = buffers_available_.front();
493 E : buffers_available_.pop_front();
494 E : ChangeBufferState(Buffer::kInUse, buffer);
495 :
496 E : *out_buffer = buffer;
497 E : return true;
498 E : }
499 :
500 E : bool Session::DestroySingletonBuffer(Buffer* buffer) {
501 E : DCHECK(buffer != NULL);
502 E : DCHECK_EQ(0u, buffer->buffer_offset);
503 E : DCHECK_EQ(buffer->mapping_size, buffer->buffer_size);
504 E : DCHECK_EQ(Buffer::kPendingWrite, buffer->state);
505 :
506 E : base::AutoLock lock(lock_);
507 :
508 : // Look for the pool that houses this buffer.
509 E : SharedMemoryBufferCollection::iterator it = shared_memory_buffers_.begin();
510 E : BufferPool* pool = NULL;
511 E : for (; it != shared_memory_buffers_.end(); ++it) {
512 E : pool = *it;
513 E : if (buffer >= pool->begin() && buffer < pool->end())
514 E : break;
515 E : }
516 :
517 : // Didn't find the pool?
518 E : if (it == shared_memory_buffers_.end()) {
519 i : LOG(ERROR) << "Unable to find pool for buffer to be destroyed.";
520 i : return false;
521 : }
522 :
523 : // If the pool contains more than one buffer, bail.
524 E : if (pool->end() - pool->begin() > 1) {
525 i : LOG(ERROR) << "Trying to destroy a pool that contains more than 1 buffer.";
526 i : return false;
527 : }
528 :
529 : // Call our testing seam notification.
530 E : OnDestroySingletonBuffer(buffer);
531 :
532 : // Remove the pool from our collection of pools.
533 E : shared_memory_buffers_.erase(it);
534 :
535 : // Remove the buffer from the buffer map.
536 E : CHECK_EQ(1u, buffers_.erase(Buffer::GetID(*buffer)));
537 :
538 : // Remove the buffer from our buffer statistics.
539 E : buffer_state_counts_[Buffer::kPendingWrite]--;
540 E : DCHECK(BufferBookkeepingIsConsistent());
541 :
542 : // Finally, delete the pool. This will clean up the buffer.
543 E : delete pool;
544 :
545 E : return true;
546 E : }
547 :
548 E : bool Session::CreateProcessEndedEvent(Buffer** buffer) {
549 E : DCHECK(buffer != NULL);
550 E : lock_.AssertAcquired();
551 :
552 E : *buffer = NULL;
553 :
554 : // We output a segment that contains a single empty event. That is, the
555 : // event consists only of a prefix whose data size is set to zero. The buffer
556 : // will be populated with the following:
557 : //
558 : // RecordPrefix: the prefix for the TraceFileSegmentHeader which follows
559 : // (with type TraceFileSegmentHeader::kTypeId).
560 : // TraceFileSegmentHeader: the segment header for the segment represented
561 : // by this buffer.
562 : // RecordPrefix: the prefix for the event itself (with type
563 : // TRACE_PROCESS_ENDED). This prefix will have a data size of zero
564 : // indicating that no structure follows.
565 : const size_t kBufferSize = sizeof(RecordPrefix) +
566 E : sizeof(TraceFileSegmentHeader) + sizeof(RecordPrefix);
567 :
568 : // Ensure that a free buffer exists.
569 E : if (buffers_available_.empty()) {
570 i : if (!AllocateBuffers(1, kBufferSize)) {
571 i : LOG(ERROR) << "Unable to allocate buffer for process ended event.";
572 i : return false;
573 : }
574 : }
575 E : DCHECK(!buffers_available_.empty());
576 :
577 : // Get a buffer for the event.
578 E : if (!GetNextBufferUnlocked(buffer) || *buffer == NULL) {
579 i : LOG(ERROR) << "Unable to get a buffer for process ended event.";
580 i : return false;
581 : }
582 E : DCHECK(*buffer != NULL);
583 :
584 : // This should pretty much never happen as we always allocate really big
585 : // buffers, but it is possible.
586 E : if ((*buffer)->buffer_size < kBufferSize) {
587 i : LOG(ERROR) << "Buffer too small for process ended event.";
588 i : return false;
589 : }
590 :
591 : // Populate the various structures in the buffer.
592 :
593 E : MappedBuffer mapped_buffer(*buffer);
594 E : if (!mapped_buffer.Map())
595 i : return false;
596 :
597 : RecordPrefix* segment_prefix =
598 E : reinterpret_cast<RecordPrefix*>(mapped_buffer.data());
599 E : uint64_t timestamp = trace::common::GetTsc();
600 E : segment_prefix->timestamp = timestamp;
601 E : segment_prefix->size = sizeof(TraceFileSegmentHeader);
602 E : segment_prefix->type = TraceFileSegmentHeader::kTypeId;
603 E : segment_prefix->version.hi = TRACE_VERSION_HI;
604 E : segment_prefix->version.lo = TRACE_VERSION_LO;
605 :
606 : TraceFileSegmentHeader* segment_header =
607 E : reinterpret_cast<TraceFileSegmentHeader*>(segment_prefix + 1);
608 E : segment_header->thread_id = 0;
609 E : segment_header->segment_length = sizeof(RecordPrefix);
610 :
611 : RecordPrefix* event_prefix =
612 E : reinterpret_cast<RecordPrefix*>(segment_header + 1);
613 E : event_prefix->timestamp = timestamp;
614 E : event_prefix->size = 0;
615 E : event_prefix->type = TRACE_PROCESS_ENDED;
616 E : event_prefix->version.hi = TRACE_VERSION_HI;
617 E : event_prefix->version.lo = TRACE_VERSION_LO;
618 :
619 E : return true;
620 E : }
621 :
622 E : bool Session::BufferBookkeepingIsConsistent() const {
623 E : lock_.AssertAcquired();
624 :
625 : size_t buffer_states_ = buffer_state_counts_[Buffer::kAvailable] +
626 E : buffer_state_counts_[Buffer::kInUse] +
627 : buffer_state_counts_[Buffer::kPendingWrite];
628 E : if (buffer_states_ != buffers_.size())
629 i : return false;
630 :
631 E : if (buffers_available_.size() != buffer_state_counts_[Buffer::kAvailable])
632 i : return false;
633 E : return true;
634 E : }
635 :
636 : } // namespace service
637 : } // namespace trace
|