1 : // Copyright 2015 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/grinder/grinders/mem_replay_grinder.h"
16 :
17 : #include <cstring>
18 :
19 : #include "syzygy/bard/raw_argument_converter.h"
20 : #include "syzygy/bard/events/heap_alloc_event.h"
21 : #include "syzygy/bard/events/heap_create_event.h"
22 : #include "syzygy/bard/events/heap_destroy_event.h"
23 : #include "syzygy/bard/events/heap_free_event.h"
24 : #include "syzygy/bard/events/heap_realloc_event.h"
25 : #include "syzygy/bard/events/heap_set_information_event.h"
26 : #include "syzygy/bard/events/heap_size_event.h"
27 : #include "syzygy/core/serialization.h"
28 : #include "syzygy/core/zstream.h"
29 :
30 : namespace grinder {
31 : namespace grinders {
32 :
33 : namespace {
34 :
35 : const char* kAsanHeapFunctionNames[] = {
36 : "asan_HeapAlloc",
37 : "asan_HeapCreate",
38 : "asan_HeapDestroy",
39 : "asan_HeapFree",
40 : "asan_HeapReAlloc",
41 : "asan_HeapSetInformation",
42 : "asan_HeapSize",
43 : };
44 :
45 : using RawArgumentConverters = std::vector<bard::RawArgumentConverter>;
46 :
47 : // A templated utility function for parsing a value from a buffer by copying
48 : // its contents.
49 E : bool ParseUint32(const uint8_t* end, const uint8_t** cursor, uint32_t* value) {
50 E : DCHECK_NE(static_cast<uint8_t*>(nullptr), end);
51 E : DCHECK_NE(static_cast<uint8_t**>(nullptr), cursor);
52 E : DCHECK_NE(static_cast<uint8_t*>(nullptr), *cursor);
53 E : DCHECK_LE(*cursor, end);
54 E : DCHECK_NE(static_cast<uint32_t*>(nullptr), value);
55 :
56 E : if (std::distance(*cursor, end) < sizeof(*value))
57 i : return false;
58 E : *value = *reinterpret_cast<const uint32_t*>(*cursor);
59 E : *cursor += sizeof(*value);
60 E : return true;
61 E : }
62 :
63 : // Builds a vector of RawArgumentConverter objects on top of the stored
64 : // arguments in the provided TraceDetailedFunctionCall.
65 : bool BuildArgumentConverters(const TraceDetailedFunctionCall* data,
66 E : RawArgumentConverters* converters) {
67 E : DCHECK_NE(static_cast<TraceDetailedFunctionCall*>(nullptr), data);
68 E : DCHECK_NE(static_cast<RawArgumentConverters*>(nullptr), converters);
69 E : const uint8_t* cursor = data->argument_data;
70 E : const uint8_t* end = data->argument_data + data->argument_data_size;
71 :
72 : // Parse the size of the argument list present in the
73 : // TraceDetailedFunctionCall record. See the encoding documented in
74 : // function_call_logger.h.
75 E : uint32_t arg_count = 0;
76 E : if (!ParseUint32(end, &cursor, &arg_count))
77 i : return false;
78 :
79 E : converters->clear();
80 E : converters->reserve(arg_count);
81 :
82 : // Parse the argument sizes and contents themselves.
83 E : const uint8_t* arg_data = cursor + sizeof(uint32_t) * arg_count;
84 E : for (size_t i = 0; i < arg_count; ++i) {
85 E : uint32_t arg_size = 0;
86 E : if (!ParseUint32(end, &cursor, &arg_size))
87 i : return false;
88 :
89 E : if (arg_data + arg_size > end)
90 i : return false;
91 E : converters->push_back(bard::RawArgumentConverter(arg_data, arg_size));
92 E : arg_data += arg_size;
93 E : }
94 :
95 E : return true;
96 E : }
97 :
98 : // Helper class for ArgumentParser.
99 : template <typename T>
100 : struct ArgumentParserTraits {
101 : static const size_t kCount = 1;
102 : };
103 :
104 : // Dummy type for ArgumentParser.
105 : struct ArgumentParserDummy {};
106 :
107 : // Specialization of dummy type for ArgumentParser.
108 : template <>
109 : struct ArgumentParserTraits<ArgumentParserDummy> {
110 : static const size_t kCount = 0;
111 : };
112 :
113 : // Helper class for parsing and validating arguments. The argument are provided
114 : // as an array of RawArgumentConverters which implicitly check for
115 : // size-compatibility with the output argument types. Allows for very compact
116 : // parsing of DetailedFunctionCalls using code like the following:
117 : //
118 : // // Extract the encoded arguments to a vector of converters.
119 : // RawArgumentConverters args;
120 : // CHECK(BuildArgumentConverters(detailed_function_call, &args));
121 : // // And parse them to their final types. Argument count and size
122 : // // constraints are automatically enforced here.
123 : // ArgumentParser<Foo, Bar> arg_parser;
124 : // CHECK(arg_parser.Parse(args));
125 : // Foo foo = arg_parser.arg0();
126 : // Bar bar = arg_parser.arg1();
127 : template <typename T0,
128 : typename T1 = ArgumentParserDummy,
129 : typename T2 = ArgumentParserDummy,
130 : typename T3 = ArgumentParserDummy,
131 : typename T4 = ArgumentParserDummy,
132 : typename T5 = ArgumentParserDummy,
133 : typename T6 = ArgumentParserDummy>
134 : class ArgumentParser {
135 : public:
136 : static const size_t kCount =
137 : ArgumentParserTraits<T0>::kCount + ArgumentParserTraits<T1>::kCount +
138 : ArgumentParserTraits<T2>::kCount + ArgumentParserTraits<T3>::kCount +
139 : ArgumentParserTraits<T4>::kCount + ArgumentParserTraits<T5>::kCount +
140 : ArgumentParserTraits<T6>::kCount;
141 :
142 : // Parses the arguments from the provided converter. Returns true on success,
143 : // false otherwise.
144 E : bool Parse(const RawArgumentConverters& args) {
145 E : if (args.size() != kCount)
146 i : return false;
147 : static_assert(kCount <= 7, "need to update this switch");
148 E : switch (kCount) {
149 : // These case statements deliberately fall through.
150 : case 7:
151 i : if (!args[6].RetrieveAs(&arg6_))
152 i : return false;
153 : case 6:
154 i : if (!args[5].RetrieveAs(&arg5_))
155 i : return false;
156 : case 5:
157 E : if (!args[4].RetrieveAs(&arg4_))
158 i : return false;
159 : case 4:
160 E : if (!args[3].RetrieveAs(&arg3_))
161 i : return false;
162 : case 3:
163 E : if (!args[2].RetrieveAs(&arg2_))
164 i : return false;
165 : case 2:
166 E : if (!args[1].RetrieveAs(&arg1_))
167 i : return false;
168 : case 1:
169 E : if (!args[0].RetrieveAs(&arg0_))
170 i : return false;
171 : }
172 E : return true;
173 E : }
174 :
175 E : const T0& arg0() const { return arg0_; }
176 E : const T1& arg1() const { return arg1_; }
177 E : const T2& arg2() const { return arg2_; }
178 E : const T3& arg3() const { return arg3_; }
179 E : const T4& arg4() const { return arg4_; }
180 : const T5& arg5() const { return arg5_; }
181 : const T6& arg6() const { return arg6_; }
182 :
183 : private:
184 : T0 arg0_;
185 : T1 arg1_;
186 : T2 arg2_;
187 : T3 arg3_;
188 : T4 arg4_;
189 : T5 arg5_;
190 : T6 arg6_;
191 : };
192 :
193 : } // namespace
194 :
195 E : MemReplayGrinder::MemReplayGrinder() : parse_error_(false) {
196 E : }
197 :
198 : bool MemReplayGrinder::ParseCommandLine(
199 E : const base::CommandLine* command_line) {
200 E : DCHECK_NE(static_cast<base::CommandLine*>(nullptr), command_line);
201 E : LoadAsanFunctionNames();
202 :
203 E : return true;
204 E : }
205 :
206 E : void MemReplayGrinder::SetParser(Parser* parser) {
207 E : DCHECK_NE(static_cast<Parser*>(nullptr), parser);
208 : // This grinder doesn't actually care about the parser in use.
209 E : }
210 :
211 E : bool MemReplayGrinder::Grind() {
212 E : if (parse_error_) {
213 i : LOG(ERROR) << "Encountered an error during parsing.";
214 i : return false;
215 : }
216 :
217 E : for (auto& proc_id_data_pair : process_data_map_) {
218 E : auto& proc_data = proc_id_data_pair.second;
219 E : if (!proc_data.pending_function_ids.empty() ||
220 : !proc_data.pending_calls.empty()) {
221 i : LOG(ERROR) << "The trace file function name table is incomplete and not "
222 : << "all detailed function call records could be parsed.";
223 i : return false;
224 : }
225 E : }
226 :
227 E : if (missing_events_.size()) {
228 i : LOG(WARNING) << "The following functions were found in the trace file but "
229 : << "are not supported by this grinder:";
230 :
231 i : for (auto& event_name : missing_events_) {
232 i : LOG(WARNING) << event_name;
233 i : }
234 : }
235 :
236 : // Grind each set of process data on its own.
237 E : for (auto& proc : process_data_map_) {
238 : // Make a heap of events across all threads in this process.
239 E : std::vector<ThreadDataIterator> heap;
240 E : for (auto& thread : proc.second.thread_data_map) {
241 E : if (thread.second.timestamps.empty())
242 i : continue;
243 E : ThreadDataIterator thread_it = {&thread.second, 0};
244 E : heap.push_back(thread_it);
245 E : }
246 E : std::make_heap(heap.begin(), heap.end());
247 :
248 : // This is used to track known objects.
249 E : ObjectMap object_map;
250 : // This is used to track synchronization points between threads.
251 E : WaitedMap waited_map;
252 :
253 : // Prepopulate the object map with entries for all the process heaps that
254 : // existed at process startup.
255 E : const ThreadDataIterator kDummyThreadDataIterator = {nullptr, 0};
256 E : const ObjectInfo kDummyObjectInfo(kDummyThreadDataIterator);
257 E : for (auto heap : proc.second.existing_heaps)
258 E : object_map.insert(std::make_pair(heap, kDummyObjectInfo));
259 :
260 : // Process all of the thread events in the serial order in which they
261 : // occurred. While doing so update object_map and waited_map, and encode
262 : // dependencies in the underlying PlotLine structures.
263 E : while (!heap.empty()) {
264 E : std::pop_heap(heap.begin(), heap.end());
265 E : auto thread_it = heap.back();
266 E : heap.pop_back();
267 :
268 : // Determine inputs and outputs of this event.
269 E : EventObjects objects;
270 E : GetEventObjects(thread_it, &objects);
271 :
272 : // Determine input dependencies for this event.
273 E : Deps deps;
274 E : if (!GetDeps(thread_it, objects, object_map, &deps))
275 i : return false;
276 :
277 : // Encode dependencies as explicit synchronization points as required,
278 : // and update the |waited_map| with this information.
279 E : if (!ApplyDeps(thread_it, object_map, deps, &waited_map))
280 i : return false;
281 :
282 : // Update the object map to reflect objects that have been destroyed,
283 : // created, or used.
284 E : if (!UpdateObjectMap(thread_it, objects, &object_map))
285 i : return false;
286 :
287 : // Increment the thread event iterator and reinsert it in the heap if
288 : // there are remaining events.
289 E : if (thread_it.increment()) {
290 E : heap.push_back(thread_it);
291 E : std::push_heap(heap.begin(), heap.end());
292 : }
293 E : }
294 E : }
295 :
296 E : return true;
297 E : }
298 :
299 E : bool MemReplayGrinder::OutputData(FILE* file) {
300 E : DCHECK_NE(static_cast<FILE*>(nullptr), file);
301 :
302 E : if (process_data_map_.empty())
303 i : return false;
304 :
305 : // Set up the streams/archives for serialization. Using gzip compression
306 : // reduces the size of the archive by over 70%.
307 E : core::FileOutStream out_stream(file);
308 E : core::ZOutStream zout_stream(&out_stream);
309 E : core::NativeBinaryOutArchive out_archive(&zout_stream);
310 E : if (!zout_stream.Init(9))
311 i : return false;
312 :
313 : // Save a magic header and version so that readers can validate the stream.
314 E : if (!out_archive.Save(bard::Story::kBardMagic))
315 i : return false;
316 E : if (!out_archive.Save(bard::Story::kBardVersion))
317 i : return false;
318 :
319 : // Serialize the stories, back to back.
320 E : if (!out_archive.Save(static_cast<size_t>(process_data_map_.size())))
321 i : return false;
322 E : for (const auto& proc_data_pair : process_data_map_) {
323 : // Output any existing heaps. The first of these is the process heap.
324 E : size_t heap_count = proc_data_pair.second.existing_heaps.size();
325 E : if (!out_archive.Save(heap_count))
326 i : return false;
327 E : for (const auto& heap : proc_data_pair.second.existing_heaps) {
328 E : if (!out_archive.Save(reinterpret_cast<uintptr_t>(heap)))
329 i : return false;
330 E : }
331 :
332 : // Output the story.
333 E : auto story = proc_data_pair.second.story;
334 E : if (!story->Save(&out_archive))
335 i : return false;
336 E : }
337 :
338 : // Ensure everything is written.
339 E : if (!zout_stream.Flush())
340 i : return false;
341 E : if (!out_stream.Flush())
342 i : return false;
343 :
344 E : return true;
345 E : }
346 :
347 : void MemReplayGrinder::OnFunctionNameTableEntry(
348 : base::Time time,
349 : DWORD process_id,
350 E : const TraceFunctionNameTableEntry* data) {
351 E : DCHECK_NE(static_cast<TraceFunctionNameTableEntry*>(nullptr), data);
352 :
353 E : if (parse_error_)
354 i : return;
355 :
356 E : std::string name(data->name);
357 E : auto it = function_enum_map_.find(name);
358 E : if (it == function_enum_map_.end()) {
359 E : missing_events_.insert(name);
360 E : return;
361 : }
362 :
363 E : ProcessData* proc_data = FindOrCreateProcessData(process_id);
364 E : auto result = proc_data->function_id_map.insert(
365 : std::make_pair(data->function_id, it->second));
366 E : DCHECK(result.second);
367 :
368 : // If the pending function ID set is now empty then the pending detailed
369 : // function call records can be drained.
370 : if (proc_data->pending_function_ids.erase(data->function_id) == 1 &&
371 E : proc_data->pending_function_ids.empty() &&
372 : !proc_data->pending_calls.empty()) {
373 E : while (!proc_data->pending_calls.empty()) {
374 E : const PendingDetailedFunctionCall& pending_call =
375 : proc_data->pending_calls.front();
376 E : if (!ParseDetailedFunctionCall(pending_call.time(),
377 : pending_call.thread_id(),
378 : pending_call.data(), proc_data)) {
379 i : return SetParseError();
380 : }
381 E : proc_data->pending_calls.pop_front();
382 E : }
383 : }
384 E : }
385 :
386 : void MemReplayGrinder::OnDetailedFunctionCall(
387 : base::Time time,
388 : DWORD process_id,
389 : DWORD thread_id,
390 E : const TraceDetailedFunctionCall* data) {
391 E : DCHECK_NE(0u, process_id);
392 E : DCHECK_NE(0u, thread_id);
393 E : DCHECK_NE(static_cast<TraceDetailedFunctionCall*>(nullptr), data);
394 :
395 E : if (parse_error_)
396 i : return;
397 :
398 E : ProcessData* proc_data = FindOrCreateProcessData(process_id);
399 E : DCHECK_NE(static_cast<ProcessData*>(nullptr), proc_data);
400 :
401 : // If function calls are already pending then all new calls must continue to
402 : // be added to the pending list.
403 E : bool push_pending = !proc_data->pending_calls.empty();
404 :
405 : // If the function name doesn't exist then the call can't be processed.
406 : // Push it to the pending list and defer its processing until the function
407 : // name has been resolved.
408 E : const auto& function = proc_data->function_id_map.find(data->function_id);
409 E : if (function == proc_data->function_id_map.end()) {
410 E : proc_data->pending_function_ids.insert(data->function_id);
411 E : push_pending = true;
412 : }
413 :
414 : // Defer processing if required.
415 E : if (push_pending) {
416 E : proc_data->pending_calls.push_back(
417 : PendingDetailedFunctionCall(time, thread_id, data));
418 E : return;
419 : }
420 :
421 : // The function name exists and there are no pending calls so parse the record
422 : // immediately.
423 E : DCHECK(function != proc_data->function_id_map.end());
424 E : DCHECK(proc_data->pending_calls.empty());
425 E : if (!ParseDetailedFunctionCall(time, thread_id, data, proc_data))
426 i : return SetParseError();
427 E : }
428 :
429 : void MemReplayGrinder::OnProcessHeap(base::Time time,
430 : DWORD process_id,
431 E : const TraceProcessHeap* data) {
432 E : DCHECK_NE(0u, process_id);
433 E : DCHECK_NE(static_cast<TraceProcessHeap*>(nullptr), data);
434 E : DCHECK_NE(0u, data->process_heap);
435 :
436 E : if (parse_error_)
437 i : return;
438 :
439 E : ProcessData* proc_data = FindOrCreateProcessData(process_id);
440 E : DCHECK_NE(static_cast<ProcessData*>(nullptr), proc_data);
441 E : proc_data->existing_heaps.push_back(
442 : reinterpret_cast<const void*>(data->process_heap));
443 E : }
444 :
445 E : void MemReplayGrinder::LoadAsanFunctionNames() {
446 E : function_enum_map_.clear();
447 E : for (size_t i = 0; i < arraysize(kAsanHeapFunctionNames); ++i) {
448 E : function_enum_map_[kAsanHeapFunctionNames[i]] =
449 : static_cast<EventType>(EventInterface::kHeapAllocEvent + i);
450 E : }
451 E : }
452 :
453 : bool MemReplayGrinder::ParseDetailedFunctionCall(
454 : base::Time time,
455 : DWORD thread_id,
456 : const TraceDetailedFunctionCall* data,
457 E : ProcessData* proc_data) {
458 E : DCHECK_NE(static_cast<ProcessData*>(nullptr), proc_data);
459 :
460 : // Lookup the function name. It is expected to exist.
461 E : const auto& function = proc_data->function_id_map.find(data->function_id);
462 E : if (function == proc_data->function_id_map.end())
463 i : return false;
464 :
465 : // Parse the arguments.
466 E : RawArgumentConverters args;
467 E : if (!BuildArgumentConverters(data, &args))
468 i : return false;
469 :
470 : // Get the associated thread data. This should not fail.
471 E : ThreadData* thread_data = FindOrCreateThreadData(proc_data, thread_id);
472 E : DCHECK_NE(static_cast<ThreadData*>(nullptr), thread_data);
473 E : DCHECK_NE(static_cast<bard::Story::PlotLine*>(nullptr),
474 E : thread_data->plot_line);
475 :
476 E : std::unique_ptr<bard::EventInterface> evt;
477 :
478 E : switch (function->second) {
479 : case EventType::kHeapAllocEvent: {
480 : ArgumentParser<HANDLE, DWORD, SIZE_T, LPVOID> parser;
481 E : if (!parser.Parse(args))
482 i : return false;
483 E : evt.reset(new bard::events::HeapAllocEvent(data->stack_trace_id,
484 : parser.arg0(), parser.arg1(),
485 : parser.arg2(), parser.arg3()));
486 E : break;
487 : }
488 :
489 : case EventType::kHeapCreateEvent: {
490 : ArgumentParser<DWORD, SIZE_T, SIZE_T, HANDLE> parser;
491 E : if (!parser.Parse(args))
492 i : return false;
493 E : evt.reset(new bard::events::HeapCreateEvent(
494 : data->stack_trace_id, parser.arg0(), parser.arg1(), parser.arg2(),
495 : parser.arg3()));
496 E : break;
497 : }
498 :
499 : case EventType::kHeapDestroyEvent: {
500 : ArgumentParser<HANDLE, BOOL> parser;
501 E : if (!parser.Parse(args))
502 i : return false;
503 E : evt.reset(new bard::events::HeapDestroyEvent(
504 : data->stack_trace_id, parser.arg0(), parser.arg1()));
505 E : break;
506 : }
507 :
508 : case EventType::kHeapFreeEvent: {
509 : // HeapFree calls also contain an optional hash of the memory contents.
510 : // This is ignored by this grinder.
511 : ArgumentParser<HANDLE, DWORD, LPVOID, BOOL, uint32_t> parser;
512 E : if (!parser.Parse(args))
513 i : return false;
514 E : evt.reset(new bard::events::HeapFreeEvent(data->stack_trace_id,
515 : parser.arg0(), parser.arg1(),
516 : parser.arg2(), parser.arg3()));
517 E : break;
518 : }
519 :
520 : case EventType::kHeapReAllocEvent: {
521 : ArgumentParser<HANDLE, DWORD, LPVOID, SIZE_T, LPVOID> parser;
522 E : if (!parser.Parse(args))
523 i : return false;
524 E : evt.reset(new bard::events::HeapReAllocEvent(
525 : data->stack_trace_id, parser.arg0(), parser.arg1(), parser.arg2(),
526 : parser.arg3(), parser.arg4()));
527 E : break;
528 : }
529 :
530 : case EventType::kHeapSetInformationEvent: {
531 : ArgumentParser<HANDLE, HEAP_INFORMATION_CLASS, PVOID, SIZE_T, BOOL>
532 : parser;
533 E : if (!parser.Parse(args))
534 i : return false;
535 E : evt.reset(new bard::events::HeapSetInformationEvent(
536 : data->stack_trace_id, parser.arg0(), parser.arg1(), parser.arg2(),
537 : parser.arg3(), parser.arg4()));
538 E : break;
539 : }
540 :
541 : case EventType::kHeapSizeEvent: {
542 : ArgumentParser<HANDLE, DWORD, LPCVOID, SIZE_T> parser;
543 E : if (!parser.Parse(args))
544 i : return false;
545 E : evt.reset(new bard::events::HeapSizeEvent(data->stack_trace_id,
546 : parser.arg0(), parser.arg1(),
547 : parser.arg2(), parser.arg3()));
548 E : break;
549 : }
550 :
551 : default: {
552 i : LOG(ERROR) << "Encountered unsupported DetailedFunctionCall record.";
553 i : return false;
554 : }
555 : }
556 :
557 E : thread_data->plot_line->push_back(evt.release());
558 E : thread_data->timestamps.push_back(data->timestamp);
559 E : return true;
560 E : }
561 :
562 i : void MemReplayGrinder::SetParseError() {
563 i : parse_error_ = true;
564 i : }
565 :
566 : MemReplayGrinder::PendingDetailedFunctionCall::PendingDetailedFunctionCall(
567 : base::Time time,
568 : DWORD thread_id,
569 : const TraceDetailedFunctionCall* data)
570 E : : time_(time), thread_id_(thread_id) {
571 E : DCHECK_NE(0u, thread_id);
572 E : DCHECK_NE(static_cast<TraceDetailedFunctionCall*>(nullptr), data);
573 :
574 E : size_t total_size = offsetof(TraceDetailedFunctionCall, argument_data) +
575 : data->argument_data_size;
576 E : data_.resize(total_size);
577 E : ::memcpy(data_.data(), data, total_size);
578 E : }
579 :
580 : MemReplayGrinder::ProcessData* MemReplayGrinder::FindOrCreateProcessData(
581 E : DWORD process_id) {
582 E : auto it = process_data_map_.lower_bound(process_id);
583 E : if (it != process_data_map_.end() && it->first == process_id)
584 E : return &it->second;
585 :
586 E : std::unique_ptr<bard::Story> story(new bard::Story());
587 E : it = process_data_map_.insert(it, std::make_pair(process_id, ProcessData()));
588 E : it->second.process_id = process_id;
589 E : it->second.story = story.get();
590 E : stories_.push_back(story.release());
591 E : return &it->second;
592 E : }
593 :
594 : MemReplayGrinder::ThreadData* MemReplayGrinder::FindOrCreateThreadData(
595 : ProcessData* proc_data,
596 E : DWORD thread_id) {
597 E : auto it = proc_data->thread_data_map.lower_bound(thread_id);
598 E : if (it != proc_data->thread_data_map.end() && it->first == thread_id)
599 E : return &it->second;
600 :
601 E : bard::Story::PlotLine* plot_line = proc_data->story->CreatePlotLine();
602 E : ThreadData thread_data;
603 E : thread_data.plot_line = plot_line;
604 E : it = proc_data->thread_data_map.insert(
605 : it, std::make_pair(thread_id, thread_data));
606 E : return &it->second;
607 E : }
608 :
609 E : void MemReplayGrinder::EnsureLinkedEvent(const ThreadDataIterator& iter) {
610 E : if (iter.event()->type() == EventInterface::kLinkedEvent)
611 i : return;
612 :
613 E : bard::events::LinkedEvent* linked_event = new bard::events::LinkedEvent(
614 : std::unique_ptr<EventInterface>(iter.event()));
615 E : (*iter.plot_line())[iter.index] = linked_event;
616 E : }
617 :
618 : void MemReplayGrinder::GetEventObjects(const ThreadDataIterator& iter,
619 E : EventObjects* objects) {
620 E : DCHECK_NE(static_cast<EventObjects*>(nullptr), objects);
621 :
622 E : auto evt = iter.inner_event();
623 E : objects->created = nullptr;
624 E : objects->destroyed = nullptr;
625 E : objects->used.clear();
626 :
627 : // Determine objects that are created, used or destroyed.
628 E : switch (evt->type()) {
629 : case EventInterface::kHeapAllocEvent: {
630 : auto e =
631 E : reinterpret_cast<const bard::events::HeapAllocEvent*>(iter.event());
632 E : if (e->trace_heap())
633 E : objects->used.push_back(e->trace_heap());
634 E : objects->created = e->trace_alloc();
635 E : break;
636 : }
637 : case EventInterface::kHeapCreateEvent: {
638 : auto e =
639 E : reinterpret_cast<const bard::events::HeapCreateEvent*>(iter.event());
640 E : objects->created = e->trace_heap();
641 E : break;
642 : }
643 : case EventInterface::kHeapDestroyEvent: {
644 : auto e =
645 E : reinterpret_cast<const bard::events::HeapDestroyEvent*>(iter.event());
646 E : objects->destroyed = e->trace_heap();
647 E : break;
648 : }
649 : case EventInterface::kHeapFreeEvent: {
650 : auto e =
651 E : reinterpret_cast<const bard::events::HeapFreeEvent*>(iter.event());
652 E : if (e->trace_heap())
653 E : objects->used.push_back(e->trace_heap());
654 E : objects->destroyed = e->trace_alloc();
655 E : break;
656 : }
657 : case EventInterface::kHeapReAllocEvent: {
658 : auto e =
659 E : reinterpret_cast<const bard::events::HeapReAllocEvent*>(iter.event());
660 E : if (e->trace_heap())
661 E : objects->used.push_back(e->trace_heap());
662 :
663 E : if (e->trace_alloc() == e->trace_realloc()) {
664 : // ReAllocs that return the original address are indistinguishable from
665 : // a simple 'use' of that address. Encode it as such.
666 E : objects->used.push_back(e->trace_alloc());
667 E : } else {
668 i : objects->destroyed = e->trace_alloc();
669 i : objects->created = e->trace_realloc();
670 : }
671 E : break;
672 : }
673 : case EventInterface::kHeapSetInformationEvent: {
674 : auto e = reinterpret_cast<const bard::events::HeapSetInformationEvent*>(
675 E : iter.event());
676 E : if (e->trace_heap())
677 E : objects->used.push_back(e->trace_heap());
678 E : break;
679 : }
680 : case EventInterface::kHeapSizeEvent: {
681 : auto e =
682 E : reinterpret_cast<const bard::events::HeapSizeEvent*>(iter.event());
683 E : if (e->trace_heap())
684 E : objects->used.push_back(e->trace_heap());
685 E : if (e->trace_alloc())
686 E : objects->used.push_back(const_cast<void*>(e->trace_alloc()));
687 : break;
688 : }
689 : default: break;
690 : }
691 E : }
692 :
693 : bool MemReplayGrinder::GetDeps(const ThreadDataIterator& iter,
694 : const EventObjects& objects,
695 : const ObjectMap& object_map,
696 E : Deps* deps) {
697 E : DCHECK_NE(static_cast<Deps*>(nullptr), deps);
698 E : DCHECK(deps->empty());
699 :
700 : // If the object being created is aliased to one that has already existed
701 : // then ensure a dependency to the previous destruction event is generated.
702 E : if (objects.created) {
703 E : auto it = object_map.find(objects.created);
704 E : if (it != object_map.end()) {
705 i : if (it->second.alive()) {
706 i : LOG(ERROR) << "Unable to create existing object: " << objects.created;
707 i : LOG(ERROR) << " Timestamp: " << std::hex << iter.timestamp();
708 i : return false;
709 : }
710 i : AddDep(iter, it->second.destroyed(), deps);
711 : }
712 : }
713 :
714 : // For each used object, create an input dependency on the creation of that
715 : // object.
716 E : for (auto used : objects.used) {
717 E : auto it = object_map.find(used);
718 E : if (it == object_map.end() || !it->second.alive()) {
719 i : LOG(ERROR) << "Unable to encode use dependency to dead or missing "
720 : << "object: " << used;
721 i : LOG(ERROR) << " Timestamp: " << std::hex << iter.timestamp();
722 i : return false;
723 : }
724 E : AddDep(iter, it->second.created(), deps);
725 E : }
726 :
727 E : if (objects.destroyed) {
728 : // For a destroyed object, create an input dependency on the most recent
729 : // use of that object on each other thread. This ensures that it won't be
730 : // destroyed during playback until all contemporary uses of it have
731 : // completed.
732 E : auto it = object_map.find(objects.destroyed);
733 E : if (it == object_map.end() || !it->second.alive()) {
734 i : LOG(ERROR) << "Unable to encode destruction depedendency to dead or "
735 : << "missing object: " << objects.destroyed;
736 i : LOG(ERROR) << " Timestamp: " << std::hex << iter.timestamp();
737 i : return false;
738 : }
739 E : for (auto thread_index_pair : it->second.last_use()) {
740 : // Skip uses on this thread, as they are implicit.
741 E : if (thread_index_pair.first == iter.thread_data)
742 E : continue;
743 E : ThreadDataIterator dep = {thread_index_pair.first,
744 E : thread_index_pair.second};
745 E : AddDep(iter, dep, deps);
746 E : }
747 : }
748 :
749 E : return true;
750 E : }
751 :
752 : void MemReplayGrinder::AddDep(const ThreadDataIterator& iter,
753 : const ThreadDataIterator& input,
754 E : Deps* deps) {
755 E : DCHECK_NE(static_cast<Deps*>(nullptr), deps);
756 :
757 : // If the dependency is to an object on a dummy thread it doesn't need to be
758 : // encoded. Such events represent creation events for objects that exist
759 : // before the playback starts.
760 E : if (input.thread_data == nullptr)
761 E : return;
762 :
763 : // Dependencies can only be to older events.
764 E : DCHECK_LT(input.timestamp(), iter.timestamp());
765 :
766 : // Dependencies to events on the same thread are implicit and need not be
767 : // encoded.
768 E : if (iter.thread_data->plot_line == input.thread_data->plot_line)
769 E : return;
770 :
771 E : deps->insert(input);
772 E : }
773 :
774 : bool MemReplayGrinder::ApplyDeps(const ThreadDataIterator& iter,
775 : const ObjectMap& object_map,
776 : const Deps& deps,
777 E : WaitedMap* waited_map) {
778 E : DCHECK_NE(static_cast<WaitedMap*>(nullptr), waited_map);
779 :
780 E : for (auto dep : deps) {
781 : // Determine if there's already a sufficiently recent encoded dependency
782 : // between these two plot lines.
783 : // NOTE: This logic could be generalized to look for paths of dependencies,
784 : // but that requires significantly more storage and computation. This
785 : // catches the most common cases.
786 E : auto plot_line_pair = PlotLinePair(iter.plot_line(), dep.plot_line());
787 E : auto waited_it = waited_map->lower_bound(plot_line_pair);
788 E : if (waited_it != waited_map->end() && waited_it->first == plot_line_pair) {
789 E : DCHECK_EQ(dep.plot_line(), waited_it->second.plot_line());
790 E : if (waited_it->second.index >= dep.index)
791 E : continue;
792 : }
793 :
794 : // Arriving here indicates that the dependency must be explicitly encoded.
795 :
796 : // Update the |waiting_map| to reflect the dependency.
797 E : if (waited_it != waited_map->end() && waited_it->first == plot_line_pair) {
798 E : waited_it->second = dep;
799 E : } else {
800 E : waited_map->insert(waited_it, std::make_pair(plot_line_pair, dep));
801 : }
802 :
803 : // Make ourselves and the dependency linked events if necessary.
804 E : EnsureLinkedEvent(iter);
805 E : EnsureLinkedEvent(dep);
806 :
807 : // Finally, wire up the dependency.
808 : reinterpret_cast<bard::events::LinkedEvent*>(iter.event())
809 E : ->AddDep(dep.event());
810 E : }
811 :
812 E : return true;
813 E : }
814 :
815 : bool MemReplayGrinder::UpdateObjectMap(const ThreadDataIterator& iter,
816 : const EventObjects& objects,
817 E : ObjectMap* object_map) {
818 E : DCHECK_NE(static_cast<ObjectMap*>(nullptr), object_map);
819 :
820 : // Forward these for readability.
821 E : auto created = objects.created;
822 E : auto destroyed = objects.destroyed;
823 E : auto& used = objects.used;
824 :
825 : // Update the object map to reflect any destroyed objects.
826 E : if (destroyed) {
827 E : auto it = object_map->find(destroyed);
828 E : if (it == object_map->end()) {
829 i : LOG(ERROR) << "Unable to destroy missing object: " << destroyed;
830 i : return false;
831 : }
832 :
833 E : ObjectInfo& info = it->second;
834 E : if (!info.alive()) {
835 i : LOG(ERROR) << "Unable to destroy dead object: " << destroyed;
836 i : return false;
837 : }
838 :
839 : // Update the object info to reflect the fact that it is now dead, and
840 : // the event that destroyed it.
841 E : info.SetDestroyed(iter);
842 : }
843 :
844 : // Update the object map to reflect any created objects.
845 E : if (created) {
846 : // Insert the created object.
847 E : auto result = object_map->insert(std::make_pair(created, ObjectInfo(iter)));
848 :
849 : // Insertion failed, as the object already existed. This is fine if it was
850 : // dead, but an error if it was alive.
851 E : if (!result.second) {
852 i : ObjectInfo& info = result.first->second;
853 i : if (info.alive()) {
854 i : LOG(ERROR) << "Unable to create alive object: " << created;
855 i : return false;
856 : }
857 :
858 : // Transition the object to being alive again.
859 i : info.SetCreated(iter);
860 : }
861 : }
862 :
863 : // Update the object map to reflect any used objects.
864 E : for (auto object : used) {
865 E : auto it = object_map->find(object);
866 E : if (it == object_map->end()) {
867 i : LOG(ERROR) << "Unable to use missing object: " << object;
868 i : return false;
869 : }
870 E : it->second.SetLastUse(iter);
871 E : }
872 :
873 E : return true;
874 E : }
875 :
876 E : MemReplayGrinder::ObjectInfo::ObjectInfo(const ThreadDataIterator& iter) {
877 E : SetCreated(iter);
878 E : }
879 :
880 E : void MemReplayGrinder::ObjectInfo::SetCreated(const ThreadDataIterator& iter) {
881 E : alive_ = true;
882 E : created_ = iter;
883 E : destroyed_ = {nullptr, 0};
884 :
885 E : last_use_.clear();
886 E : SetLastUse(iter);
887 E : }
888 :
889 E : void MemReplayGrinder::ObjectInfo::SetLastUse(const ThreadDataIterator& iter) {
890 E : last_use_[iter.thread_data] = iter.index;
891 E : }
892 :
893 : void MemReplayGrinder::ObjectInfo::SetDestroyed(
894 E : const ThreadDataIterator& iter) {
895 E : alive_ = false;
896 E : destroyed_ = iter;
897 E : SetLastUse(iter);
898 E : }
899 :
900 : } // namespace grinders
901 : } // namespace grinder
|