1 : // Copyright 2015 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/grinder/grinders/mem_replay_grinder.h"
16 :
17 : #include <cstring>
18 :
19 : #include "syzygy/bard/raw_argument_converter.h"
20 : #include "syzygy/bard/events/heap_alloc_event.h"
21 : #include "syzygy/bard/events/heap_create_event.h"
22 : #include "syzygy/bard/events/heap_destroy_event.h"
23 : #include "syzygy/bard/events/heap_free_event.h"
24 : #include "syzygy/bard/events/heap_realloc_event.h"
25 : #include "syzygy/bard/events/heap_set_information_event.h"
26 : #include "syzygy/bard/events/heap_size_event.h"
27 : #include "syzygy/core/serialization.h"
28 : #include "syzygy/core/zstream.h"
29 :
30 : namespace grinder {
31 : namespace grinders {
32 :
33 : namespace {
34 :
35 : const char* kAsanHeapFunctionNames[] = {
36 : "asan_HeapAlloc",
37 : "asan_HeapCreate",
38 : "asan_HeapDestroy",
39 : "asan_HeapFree",
40 : "asan_HeapReAlloc",
41 : "asan_HeapSetInformation",
42 : "asan_HeapSize",
43 : };
44 :
45 : using RawArgumentConverters = std::vector<bard::RawArgumentConverter>;
46 :
47 : // A templated utility function for parsing a value from a buffer by copying
48 : // its contents.
49 E : bool ParseUint32(const uint8* end, const uint8** cursor, uint32* value) {
50 E : DCHECK_NE(static_cast<uint8*>(nullptr), end);
51 E : DCHECK_NE(static_cast<uint8**>(nullptr), cursor);
52 E : DCHECK_NE(static_cast<uint8*>(nullptr), *cursor);
53 E : DCHECK_LE(*cursor, end);
54 E : DCHECK_NE(static_cast<uint32*>(nullptr), value);
55 :
56 E : if (std::distance(*cursor, end) < sizeof(*value))
57 i : return false;
58 E : *value = *reinterpret_cast<const uint32*>(*cursor);
59 E : *cursor += sizeof(*value);
60 E : return true;
61 E : }
62 :
63 : // Builds a vector of RawArgumentConverter objects on top of the stored
64 : // arguments in the provided TraceDetailedFunctionCall.
65 : bool BuildArgumentConverters(const TraceDetailedFunctionCall* data,
66 E : RawArgumentConverters* converters) {
67 E : DCHECK_NE(static_cast<TraceDetailedFunctionCall*>(nullptr), data);
68 E : DCHECK_NE(static_cast<RawArgumentConverters*>(nullptr), converters);
69 E : const uint8_t* cursor = data->argument_data;
70 E : const uint8_t* end = data->argument_data + data->argument_data_size;
71 :
72 : // Parse the size of the argument list present in the
73 : // TraceDetailedFunctionCall record. See the encoding documented in
74 : // function_call_logger.h.
75 E : uint32_t arg_count = 0;
76 E : if (!ParseUint32(end, &cursor, &arg_count))
77 i : return false;
78 :
79 E : converters->clear();
80 E : converters->reserve(arg_count);
81 :
82 : // Parse the argument sizes and contents themselves.
83 E : const uint8_t* arg_data = cursor + sizeof(uint32_t) * arg_count;
84 E : for (size_t i = 0; i < arg_count; ++i) {
85 E : uint32_t arg_size = 0;
86 E : if (!ParseUint32(end, &cursor, &arg_size))
87 i : return false;
88 :
89 E : if (arg_data + arg_size > end)
90 i : return false;
91 E : converters->push_back(bard::RawArgumentConverter(arg_data, arg_size));
92 E : arg_data += arg_size;
93 E : }
94 :
95 E : return true;
96 E : }
97 :
98 : // Helper class for ArgumentParser.
99 : template <typename T>
100 : struct ArgumentParserTraits {
101 : static const size_t kCount = 1;
102 : };
103 :
104 : // Dummy type for ArgumentParser.
105 : struct ArgumentParserDummy {};
106 :
107 : // Specialization of dummy type for ArgumentParser.
108 : template <>
109 : struct ArgumentParserTraits<ArgumentParserDummy> {
110 : static const size_t kCount = 0;
111 : };
112 :
113 : // Helper class for parsing and validating arguments. The argument are provided
114 : // as an array of RawArgumentConverters which implicitly check for
115 : // size-compatibility with the output argument types. Allows for very compact
116 : // parsing of DetailedFunctionCalls using code like the following:
117 : //
118 : // // Extract the encoded arguments to a vector of converters.
119 : // RawArgumentConverters args;
120 : // CHECK(BuildArgumentConverters(detailed_function_call, &args));
121 : // // And parse them to their final types. Argument count and size
122 : // // constraints are automatically enforced here.
123 : // ArgumentParser<Foo, Bar> arg_parser;
124 : // CHECK(arg_parser.Parse(args));
125 : // Foo foo = arg_parser.arg0();
126 : // Bar bar = arg_parser.arg1();
127 : template <typename T0,
128 : typename T1 = ArgumentParserDummy,
129 : typename T2 = ArgumentParserDummy,
130 : typename T3 = ArgumentParserDummy,
131 : typename T4 = ArgumentParserDummy,
132 : typename T5 = ArgumentParserDummy,
133 : typename T6 = ArgumentParserDummy>
134 : class ArgumentParser {
135 : public:
136 : static const size_t kCount =
137 : ArgumentParserTraits<T0>::kCount + ArgumentParserTraits<T1>::kCount +
138 : ArgumentParserTraits<T2>::kCount + ArgumentParserTraits<T3>::kCount +
139 : ArgumentParserTraits<T4>::kCount + ArgumentParserTraits<T5>::kCount +
140 : ArgumentParserTraits<T6>::kCount;
141 :
142 : // Parses the arguments from the provided converter. Returns true on success,
143 : // false otherwise.
144 E : bool Parse(const RawArgumentConverters& args) {
145 E : if (args.size() != kCount)
146 i : return false;
147 : static_assert(kCount <= 7, "need to update this switch");
148 E : switch (kCount) {
149 : // These case statements deliberately fall through.
150 : case 7:
151 i : if (!args[6].RetrieveAs(&arg6_))
152 i : return false;
153 : case 6:
154 i : if (!args[5].RetrieveAs(&arg5_))
155 i : return false;
156 : case 5:
157 E : if (!args[4].RetrieveAs(&arg4_))
158 i : return false;
159 : case 4:
160 E : if (!args[3].RetrieveAs(&arg3_))
161 i : return false;
162 : case 3:
163 E : if (!args[2].RetrieveAs(&arg2_))
164 i : return false;
165 : case 2:
166 E : if (!args[1].RetrieveAs(&arg1_))
167 i : return false;
168 : case 1:
169 E : if (!args[0].RetrieveAs(&arg0_))
170 i : return false;
171 : }
172 E : return true;
173 E : }
174 :
175 E : const T0& arg0() const { return arg0_; }
176 E : const T1& arg1() const { return arg1_; }
177 E : const T2& arg2() const { return arg2_; }
178 E : const T3& arg3() const { return arg3_; }
179 E : const T4& arg4() const { return arg4_; }
180 : const T5& arg5() const { return arg5_; }
181 : const T6& arg6() const { return arg6_; }
182 :
183 : private:
184 : T0 arg0_;
185 : T1 arg1_;
186 : T2 arg2_;
187 : T3 arg3_;
188 : T4 arg4_;
189 : T5 arg5_;
190 : T6 arg6_;
191 : };
192 :
193 : } // namespace
194 :
195 E : MemReplayGrinder::MemReplayGrinder() : parse_error_(false) {
196 E : }
197 :
198 : bool MemReplayGrinder::ParseCommandLine(
199 E : const base::CommandLine* command_line) {
200 E : DCHECK_NE(static_cast<base::CommandLine*>(nullptr), command_line);
201 E : LoadAsanFunctionNames();
202 :
203 E : return true;
204 E : }
205 :
206 E : void MemReplayGrinder::SetParser(Parser* parser) {
207 E : DCHECK_NE(static_cast<Parser*>(nullptr), parser);
208 : // This grinder doesn't actually care about the parser in use.
209 E : }
210 :
211 E : bool MemReplayGrinder::Grind() {
212 E : if (parse_error_) {
213 i : LOG(ERROR) << "Encountered an error during parsing.";
214 i : return false;
215 : }
216 :
217 E : for (auto& proc_id_data_pair : process_data_map_) {
218 E : auto& proc_data = proc_id_data_pair.second;
219 : if (!proc_data.pending_function_ids.empty() ||
220 E : !proc_data.pending_calls.empty()) {
221 i : LOG(ERROR) << "The trace file function name table is incomplete and not "
222 : << "all detailed function call records could be parsed.";
223 i : return false;
224 : }
225 E : }
226 :
227 E : if (missing_events_.size()) {
228 i : LOG(WARNING) << "The following functions were found in the trace file but "
229 : << "are not supported by this grinder:";
230 :
231 i : for (auto& event_name : missing_events_) {
232 i : LOG(WARNING) << event_name;
233 i : }
234 : }
235 :
236 : // Grind each set of process data on its own.
237 E : for (auto& proc : process_data_map_) {
238 : // Make a heap of events across all threads in this process.
239 E : std::vector<ThreadDataIterator> heap;
240 E : for (auto& thread : proc.second.thread_data_map) {
241 E : if (thread.second.timestamps.empty())
242 i : continue;
243 E : ThreadDataIterator thread_it = {&thread.second, 0};
244 E : heap.push_back(thread_it);
245 : }
246 E : std::make_heap(heap.begin(), heap.end());
247 :
248 : // This is used to track known objects while they are alive.
249 E : ObjectMap object_map;
250 : // This is used to track synchronization points between threads.
251 E : WaitedMap waited_map;
252 :
253 : // Prepopulate the object map with entries for all the process heaps that
254 : // existed at process startup.
255 E : const ThreadDataIterator kDummyThreadDataIterator = {nullptr, 0};
256 E : for (auto heap : proc.second.existing_heaps)
257 E : object_map.insert(std::make_pair(heap, kDummyThreadDataIterator));
258 :
259 : // Process all of the thread events in the serial order in which they
260 : // occurred. While doing so update object_map and waited_map, and encode
261 : // dependencies in the underlying PlotLine structures.
262 E : while (!heap.empty()) {
263 E : std::pop_heap(heap.begin(), heap.end());
264 E : auto thread_it = heap.back();
265 E : heap.pop_back();
266 :
267 : // Determine input dependencies for this event.
268 E : Deps deps;
269 E : if (!GetDeps(thread_it, &deps))
270 i : return false;
271 :
272 : // Encode dependencies as explicit synchronization points as required,
273 : // and update the |waited_map| with this information.
274 E : if (!ApplyDeps(thread_it, object_map, deps, &waited_map))
275 i : return false;
276 :
277 : // Update the object map to reflect objects that have been destroyed or
278 : // created.
279 E : if (!UpdateObjectMap(thread_it, &object_map))
280 i : return false;
281 :
282 : // Increment the thread event iterator and reinsert it in the heap if
283 : // there are remaining events.
284 E : if (thread_it.increment()) {
285 E : heap.push_back(thread_it);
286 E : std::push_heap(heap.begin(), heap.end());
287 : }
288 E : }
289 E : }
290 :
291 E : return true;
292 E : }
293 :
294 E : bool MemReplayGrinder::OutputData(FILE* file) {
295 E : DCHECK_NE(static_cast<FILE*>(nullptr), file);
296 :
297 E : if (process_data_map_.empty())
298 i : return false;
299 :
300 : // Set up the streams/archives for serialization. Using gzip compression
301 : // reduces the size of the archive by over 70%.
302 E : core::FileOutStream out_stream(file);
303 E : core::ZOutStream zout_stream(&out_stream);
304 E : core::NativeBinaryOutArchive out_archive(&zout_stream);
305 E : if (!zout_stream.Init(9))
306 i : return false;
307 :
308 : // Serialize the stories, back to back.
309 E : if (!out_archive.Save(static_cast<size_t>(process_data_map_.size())))
310 i : return false;
311 E : for (const auto& proc_data_pair : process_data_map_) {
312 E : auto story = proc_data_pair.second.story;
313 E : if (!story->Save(&out_archive))
314 i : return false;
315 E : }
316 :
317 : // Ensure everything is written.
318 E : if (!zout_stream.Flush())
319 i : return false;
320 E : if (!out_stream.Flush())
321 i : return false;
322 :
323 E : return true;
324 E : }
325 :
326 : void MemReplayGrinder::OnFunctionNameTableEntry(
327 : base::Time time,
328 : DWORD process_id,
329 E : const TraceFunctionNameTableEntry* data) {
330 E : DCHECK_NE(static_cast<TraceFunctionNameTableEntry*>(nullptr), data);
331 :
332 E : if (parse_error_)
333 i : return;
334 :
335 E : std::string name(data->name);
336 E : auto it = function_enum_map_.find(name);
337 E : if (it == function_enum_map_.end()) {
338 E : missing_events_.insert(name);
339 E : return;
340 : }
341 :
342 E : ProcessData* proc_data = FindOrCreateProcessData(process_id);
343 : auto result = proc_data->function_id_map.insert(
344 E : std::make_pair(data->function_id, it->second));
345 E : DCHECK(result.second);
346 :
347 : // If the pending function ID set is now empty then the pending detailed
348 : // function call records can be drained.
349 : if (proc_data->pending_function_ids.erase(data->function_id) == 1 &&
350 : proc_data->pending_function_ids.empty() &&
351 E : !proc_data->pending_calls.empty()) {
352 E : while (!proc_data->pending_calls.empty()) {
353 : const PendingDetailedFunctionCall& pending_call =
354 E : proc_data->pending_calls.front();
355 : if (!ParseDetailedFunctionCall(pending_call.time(),
356 : pending_call.thread_id(),
357 E : pending_call.data(), proc_data)) {
358 i : return SetParseError();
359 : }
360 E : proc_data->pending_calls.pop_front();
361 E : }
362 : }
363 E : }
364 :
365 : void MemReplayGrinder::OnDetailedFunctionCall(
366 : base::Time time,
367 : DWORD process_id,
368 : DWORD thread_id,
369 E : const TraceDetailedFunctionCall* data) {
370 E : DCHECK_NE(0u, process_id);
371 E : DCHECK_NE(0u, thread_id);
372 E : DCHECK_NE(static_cast<TraceDetailedFunctionCall*>(nullptr), data);
373 :
374 E : if (parse_error_)
375 i : return;
376 :
377 E : ProcessData* proc_data = FindOrCreateProcessData(process_id);
378 E : DCHECK_NE(static_cast<ProcessData*>(nullptr), proc_data);
379 :
380 : // If function calls are already pending then all new calls must continue to
381 : // be added to the pending list.
382 E : bool push_pending = !proc_data->pending_calls.empty();
383 :
384 : // If the function name doesn't exist then the call can't be processed.
385 : // Push it to the pending list and defer its processing until the function
386 : // name has been resolved.
387 E : const auto& function = proc_data->function_id_map.find(data->function_id);
388 E : if (function == proc_data->function_id_map.end()) {
389 E : proc_data->pending_function_ids.insert(data->function_id);
390 E : push_pending = true;
391 : }
392 :
393 : // Defer processing if required.
394 E : if (push_pending) {
395 : proc_data->pending_calls.push_back(
396 E : PendingDetailedFunctionCall(time, thread_id, data));
397 E : return;
398 : }
399 :
400 : // The function name exists and there are no pending calls so parse the record
401 : // immediately.
402 E : DCHECK(function != proc_data->function_id_map.end());
403 E : DCHECK(proc_data->pending_calls.empty());
404 E : if (!ParseDetailedFunctionCall(time, thread_id, data, proc_data))
405 i : return SetParseError();
406 E : }
407 :
408 : void MemReplayGrinder::OnProcessHeap(base::Time time,
409 : DWORD process_id,
410 E : const TraceProcessHeap* data) {
411 E : DCHECK_NE(0u, process_id);
412 E : DCHECK_NE(static_cast<TraceProcessHeap*>(nullptr), data);
413 E : DCHECK_NE(0u, data->process_heap);
414 :
415 E : if (parse_error_)
416 i : return;
417 :
418 E : ProcessData* proc_data = FindOrCreateProcessData(process_id);
419 E : DCHECK_NE(static_cast<ProcessData*>(nullptr), proc_data);
420 : proc_data->existing_heaps.push_back(
421 E : reinterpret_cast<const void*>(data->process_heap));
422 E : }
423 :
424 E : void MemReplayGrinder::LoadAsanFunctionNames() {
425 E : function_enum_map_.clear();
426 E : for (size_t i = 0; i < arraysize(kAsanHeapFunctionNames); ++i) {
427 : function_enum_map_[kAsanHeapFunctionNames[i]] =
428 E : static_cast<EventType>(EventInterface::kHeapAllocEvent + i);
429 E : }
430 E : }
431 :
432 : bool MemReplayGrinder::ParseDetailedFunctionCall(
433 : base::Time time,
434 : DWORD thread_id,
435 : const TraceDetailedFunctionCall* data,
436 E : ProcessData* proc_data) {
437 E : DCHECK_NE(static_cast<ProcessData*>(nullptr), proc_data);
438 :
439 : // Lookup the function name. It is expected to exist.
440 E : const auto& function = proc_data->function_id_map.find(data->function_id);
441 E : if (function == proc_data->function_id_map.end())
442 i : return false;
443 :
444 : // Parse the arguments.
445 E : RawArgumentConverters args;
446 E : if (!BuildArgumentConverters(data, &args))
447 i : return false;
448 :
449 : // Get the associated thread data. This should not fail.
450 E : ThreadData* thread_data = FindOrCreateThreadData(proc_data, thread_id);
451 E : DCHECK_NE(static_cast<ThreadData*>(nullptr), thread_data);
452 : DCHECK_NE(static_cast<bard::Story::PlotLine*>(nullptr),
453 E : thread_data->plot_line);
454 :
455 E : scoped_ptr<bard::EventInterface> evt;
456 :
457 E : switch (function->second) {
458 : case EventType::kHeapAllocEvent: {
459 : ArgumentParser<HANDLE, DWORD, SIZE_T, LPVOID> parser;
460 E : if (!parser.Parse(args))
461 i : return false;
462 : evt.reset(new bard::events::HeapAllocEvent(parser.arg0(), parser.arg1(),
463 E : parser.arg2(), parser.arg3()));
464 E : break;
465 : }
466 :
467 : case EventType::kHeapCreateEvent: {
468 : ArgumentParser<DWORD, SIZE_T, SIZE_T, HANDLE> parser;
469 E : if (!parser.Parse(args))
470 i : return false;
471 : evt.reset(new bard::events::HeapCreateEvent(
472 E : parser.arg0(), parser.arg1(), parser.arg2(), parser.arg3()));
473 E : break;
474 : }
475 :
476 : case EventType::kHeapDestroyEvent: {
477 : ArgumentParser<HANDLE, BOOL> parser;
478 E : if (!parser.Parse(args))
479 i : return false;
480 : evt.reset(
481 E : new bard::events::HeapDestroyEvent(parser.arg0(), parser.arg1()));
482 E : break;
483 : }
484 :
485 : case EventType::kHeapFreeEvent: {
486 : // HeapFree calls also contain an optional hash of the memory contents.
487 : // This is ignored by this grinder.
488 : ArgumentParser<HANDLE, DWORD, LPVOID, BOOL, uint32_t> parser;
489 E : if (!parser.Parse(args))
490 i : return false;
491 : evt.reset(new bard::events::HeapFreeEvent(parser.arg0(), parser.arg1(),
492 E : parser.arg2(), parser.arg3()));
493 E : break;
494 : }
495 :
496 : case EventType::kHeapReAllocEvent: {
497 : ArgumentParser<HANDLE, DWORD, LPVOID, SIZE_T, LPVOID> parser;
498 E : if (!parser.Parse(args))
499 i : return false;
500 : evt.reset(new bard::events::HeapReAllocEvent(parser.arg0(), parser.arg1(),
501 : parser.arg2(), parser.arg3(),
502 E : parser.arg4()));
503 E : break;
504 : }
505 :
506 : case EventType::kHeapSetInformationEvent: {
507 : ArgumentParser<HANDLE, HEAP_INFORMATION_CLASS, PVOID, SIZE_T, BOOL>
508 : parser;
509 E : if (!parser.Parse(args))
510 i : return false;
511 : evt.reset(new bard::events::HeapSetInformationEvent(
512 : parser.arg0(), parser.arg1(), parser.arg2(), parser.arg3(),
513 E : parser.arg4()));
514 E : break;
515 : }
516 :
517 : case EventType::kHeapSizeEvent: {
518 : ArgumentParser<HANDLE, DWORD, LPCVOID, SIZE_T> parser;
519 E : if (!parser.Parse(args))
520 i : return false;
521 : evt.reset(new bard::events::HeapSizeEvent(parser.arg0(), parser.arg1(),
522 E : parser.arg2(), parser.arg3()));
523 E : break;
524 : }
525 :
526 : default: {
527 i : LOG(ERROR) << "Encountered unsupported DetailedFunctionCall record.";
528 i : return false;
529 : }
530 : }
531 :
532 E : thread_data->plot_line->push_back(evt.Pass());
533 E : thread_data->timestamps.push_back(data->timestamp);
534 E : return true;
535 E : }
536 :
537 i : void MemReplayGrinder::SetParseError() {
538 i : parse_error_ = true;
539 i : }
540 :
541 : MemReplayGrinder::PendingDetailedFunctionCall::PendingDetailedFunctionCall(
542 : base::Time time,
543 : DWORD thread_id,
544 : const TraceDetailedFunctionCall* data)
545 E : : time_(time), thread_id_(thread_id) {
546 E : DCHECK_NE(0u, thread_id);
547 E : DCHECK_NE(static_cast<TraceDetailedFunctionCall*>(nullptr), data);
548 :
549 : size_t total_size = offsetof(TraceDetailedFunctionCall, argument_data) +
550 E : data->argument_data_size;
551 E : data_.resize(total_size);
552 E : ::memcpy(data_.data(), data, total_size);
553 E : }
554 :
555 : MemReplayGrinder::ProcessData* MemReplayGrinder::FindOrCreateProcessData(
556 E : DWORD process_id) {
557 E : auto it = process_data_map_.lower_bound(process_id);
558 E : if (it != process_data_map_.end() && it->first == process_id)
559 E : return &it->second;
560 :
561 E : scoped_ptr<bard::Story> story(new bard::Story());
562 E : it = process_data_map_.insert(it, std::make_pair(process_id, ProcessData()));
563 E : it->second.process_id = process_id;
564 E : it->second.story = story.get();
565 E : stories_.push_back(story.Pass());
566 E : return &it->second;
567 E : }
568 :
569 : MemReplayGrinder::ThreadData* MemReplayGrinder::FindOrCreateThreadData(
570 : ProcessData* proc_data,
571 E : DWORD thread_id) {
572 E : auto it = proc_data->thread_data_map.lower_bound(thread_id);
573 E : if (it != proc_data->thread_data_map.end() && it->first == thread_id)
574 E : return &it->second;
575 :
576 E : bard::Story::PlotLine* plot_line = proc_data->story->CreatePlotLine();
577 E : ThreadData thread_data;
578 E : thread_data.plot_line = plot_line;
579 : it = proc_data->thread_data_map.insert(
580 E : it, std::make_pair(thread_id, thread_data));
581 E : return &it->second;
582 E : }
583 :
584 E : void MemReplayGrinder::EnsureLinkedEvent(const ThreadDataIterator& iter) {
585 E : if (iter.event()->type() == EventInterface::kLinkedEvent)
586 i : return;
587 :
588 : bard::events::LinkedEvent* linked_event =
589 E : new bard::events::LinkedEvent(scoped_ptr<EventInterface>(iter.event()));
590 E : (*iter.plot_line())[iter.index] = linked_event;
591 E : }
592 :
593 E : bool MemReplayGrinder::GetDeps(const ThreadDataIterator& iter, Deps* deps) {
594 E : DCHECK_NE(static_cast<Deps*>(nullptr), deps);
595 E : DCHECK(deps->empty());
596 :
597 E : auto evt = iter.inner_event();
598 :
599 E : switch (evt->type()) {
600 : case EventInterface::kHeapAllocEvent: {
601 : auto e =
602 E : reinterpret_cast<const bard::events::HeapAllocEvent*>(iter.event());
603 E : deps->insert(e->trace_heap());
604 E : break;
605 : }
606 : case EventInterface::kHeapDestroyEvent: {
607 : auto e =
608 E : reinterpret_cast<const bard::events::HeapDestroyEvent*>(iter.event());
609 E : deps->insert(e->trace_heap());
610 E : break;
611 : }
612 : case EventInterface::kHeapFreeEvent: {
613 : auto e =
614 E : reinterpret_cast<const bard::events::HeapFreeEvent*>(iter.event());
615 E : deps->insert(e->trace_heap());
616 E : deps->insert(e->trace_alloc());
617 E : break;
618 : }
619 : case EventInterface::kHeapReAllocEvent: {
620 : auto e =
621 E : reinterpret_cast<const bard::events::HeapReAllocEvent*>(iter.event());
622 E : deps->insert(e->trace_heap());
623 E : deps->insert(e->trace_alloc());
624 E : break;
625 : }
626 : case EventInterface::kHeapSetInformationEvent: {
627 : auto e = reinterpret_cast<const bard::events::HeapSetInformationEvent*>(
628 E : iter.event());
629 E : deps->insert(e->trace_heap());
630 E : break;
631 : }
632 : case EventInterface::kHeapSizeEvent: {
633 : auto e =
634 E : reinterpret_cast<const bard::events::HeapSizeEvent*>(iter.event());
635 E : deps->insert(e->trace_heap());
636 E : deps->insert(e->trace_alloc());
637 : break;
638 : }
639 : default: break;
640 : }
641 :
642 E : return true;
643 E : }
644 :
645 : bool MemReplayGrinder::ApplyDeps(const ThreadDataIterator& iter,
646 : const ObjectMap& object_map,
647 : const Deps& deps,
648 E : WaitedMap* waited_map) {
649 E : DCHECK_NE(static_cast<WaitedMap*>(nullptr), waited_map);
650 :
651 E : for (auto dep : deps) {
652 E : auto dep_it = object_map.find(dep);
653 E : if (dep_it == object_map.end()) {
654 i : LOG(ERROR) << "Unable to find required input object " << dep;
655 i : return false;
656 : }
657 :
658 : // If the object was created on a dummy thread then it exists at startup
659 : // and doesn't need to be encoded as a dependency.
660 E : if (dep_it->second.thread_data == nullptr)
661 E : continue;
662 :
663 : // If the object was created on the same thread as the current object
664 : // being processed then an implicit dependency already exists.
665 E : if (iter.plot_line() == dep_it->second.plot_line())
666 E : continue;
667 :
668 : // Determine if there's already a sufficiently recent encoded dependency
669 : // between these two plot lines.
670 : auto plot_line_pair =
671 E : PlotLinePair(iter.plot_line(), dep_it->second.plot_line());
672 E : auto waited_it = waited_map->lower_bound(plot_line_pair);
673 E : if (waited_it != waited_map->end() && waited_it->first == plot_line_pair) {
674 E : DCHECK_EQ(dep_it->second.plot_line(), waited_it->second.plot_line());
675 E : if (waited_it->second.index >= dep_it->second.index)
676 E : continue;
677 : }
678 :
679 : // Arriving here indicates that the dependency must be explicitly encoded.
680 :
681 : // Update the |waiting_map| to reflect the dependency.
682 E : if (waited_it != waited_map->end() && waited_it->first == plot_line_pair) {
683 E : waited_it->second = dep_it->second;
684 E : } else {
685 : waited_map->insert(waited_it,
686 E : std::make_pair(plot_line_pair, dep_it->second));
687 : }
688 :
689 : // Make ourselves and the dependency linked events if necessary.
690 E : EnsureLinkedEvent(iter);
691 E : EnsureLinkedEvent(dep_it->second);
692 :
693 : // Finally, wire up the dependency.
694 : reinterpret_cast<bard::events::LinkedEvent*>(iter.event())
695 E : ->AddDep(dep_it->second.event());
696 E : }
697 :
698 E : return true;
699 E : }
700 :
701 : bool MemReplayGrinder::UpdateObjectMap(const ThreadDataIterator& iter,
702 E : ObjectMap* object_map) {
703 E : DCHECK_NE(static_cast<ObjectMap*>(nullptr), object_map);
704 :
705 E : bool allow_existing = false;
706 E : void* created = nullptr;
707 E : void* destroyed = nullptr;
708 :
709 E : auto evt = iter.inner_event();
710 :
711 : // Determine which objects are created and/or destroyed by the event.
712 E : switch (evt->type()) {
713 : case EventInterface::kHeapAllocEvent: {
714 E : auto e = reinterpret_cast<const bard::events::HeapAllocEvent*>(evt);
715 E : created = e->trace_alloc();
716 E : break;
717 : }
718 : case EventInterface::kHeapCreateEvent: {
719 E : auto e = reinterpret_cast<const bard::events::HeapCreateEvent*>(evt);
720 E : created = e->trace_heap();
721 E : break;
722 : }
723 : case EventInterface::kHeapDestroyEvent: {
724 E : auto e = reinterpret_cast<const bard::events::HeapDestroyEvent*>(evt);
725 E : destroyed = e->trace_heap();
726 E : break;
727 : }
728 : case EventInterface::kHeapFreeEvent: {
729 E : auto e = reinterpret_cast<const bard::events::HeapFreeEvent*>(evt);
730 E : destroyed = e->trace_alloc();
731 E : break;
732 : }
733 : case EventInterface::kHeapReAllocEvent: {
734 E : auto e = reinterpret_cast<const bard::events::HeapReAllocEvent*>(evt);
735 E : destroyed = e->trace_alloc();
736 E : created = e->trace_realloc();
737 : break;
738 : }
739 : default: break;
740 : }
741 :
742 E : if (destroyed) {
743 E : auto it = object_map->find(destroyed);
744 E : if (it == object_map->end()) {
745 : // TODO(chrisha): Make events be able to represent themselves as
746 : // strings and log detailed information here?
747 i : LOG(ERROR) << "Failed to destroy object " << destroyed;
748 i : return false;
749 : }
750 E : object_map->erase(it);
751 : }
752 :
753 E : if (created) {
754 E : auto result = object_map->insert(std::make_pair(created, iter));
755 E : if (!result.second && !allow_existing) {
756 i : LOG(ERROR) << "Failed to create object " << created;
757 i : return false;
758 : }
759 : }
760 :
761 E : return true;
762 E : }
763 :
764 : } // namespace grinders
765 : } // namespace grinder
|