1 : // Copyright 2013 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/grinder/grinders/sample_grinder.h"
16 :
17 : #include "base/strings/string_util.h"
18 : #include "base/strings/stringprintf.h"
19 : #include "syzygy/block_graph/basic_block_decomposer.h"
20 : #include "syzygy/block_graph/block_graph.h"
21 : #include "syzygy/common/align.h"
22 : #include "syzygy/grinder/cache_grind_writer.h"
23 : #include "syzygy/grinder/coverage_data.h"
24 : #include "syzygy/pe/decomposer.h"
25 : #include "syzygy/pe/find.h"
26 : #include "syzygy/pe/pe_transform_policy.h"
27 :
28 : namespace grinder {
29 : namespace grinders {
30 :
31 : namespace {
32 :
33 : using basic_block_util::ModuleInformation;
34 : using trace::parser::AbsoluteAddress64;
35 :
36 : typedef block_graph::BlockGraph BlockGraph;
37 : typedef core::AddressRange<core::RelativeAddress, size_t> Range;
38 : typedef SampleGrinder::HeatMap HeatMap;
39 :
40 E : core::RelativeAddress GetBucketStart(const TraceSampleData* sample_data) {
41 E : DCHECK(sample_data != NULL);
42 : return core::RelativeAddress(
43 : reinterpret_cast<uint32>(sample_data->bucket_start) -
44 E : reinterpret_cast<uint32>(sample_data->module_base_addr));
45 E : }
46 :
47 : // Returns the size of an intersection between a given address range and a
48 : // sample bucket.
49 : size_t IntersectionSize(const Range& range,
50 : const core::RelativeAddress& bucket_start,
51 E : size_t bucket_size) {
52 E : size_t left = std::max(range.start().value(), bucket_start.value());
53 : size_t right = std::min(range.end().value(),
54 E : bucket_start.value() + bucket_size);
55 E : if (right <= left)
56 i : return 0;
57 E : return right - left;
58 E : }
59 :
60 : bool BuildHeatMapForCodeBlock(const pe::PETransformPolicy& policy,
61 : const Range& block_range,
62 : const BlockGraph::Block* block,
63 : core::StringTable* string_table,
64 E : HeatMap* heat_map) {
65 E : DCHECK(block != NULL);
66 E : DCHECK(string_table != NULL);
67 E : DCHECK(heat_map != NULL);
68 E : DCHECK_EQ(BlockGraph::CODE_BLOCK, block->type());
69 :
70 : const std::string* compiland = &string_table->InternString(
71 E : block->compiland_name());
72 : const std::string* function = &string_table->InternString(
73 E : block->name());
74 E : SampleGrinder::BasicBlockData data = { compiland, function, 0.0 };
75 :
76 : // If the code block is basic block decomposable then decompose it and
77 : // iterate over its basic blocks.
78 E : bool handled_basic_blocks = false;
79 E : if (policy.BlockIsSafeToBasicBlockDecompose(block)) {
80 E : block_graph::BasicBlockSubGraph bbsg;
81 E : block_graph::BasicBlockDecomposer bbd(block, &bbsg);
82 E : if (bbd.Decompose()) {
83 : block_graph::BasicBlockSubGraph::BBCollection::const_iterator bb_it =
84 E : bbsg.basic_blocks().begin();
85 E : for (; bb_it != bbsg.basic_blocks().end(); ++bb_it) {
86 E : const block_graph::BasicBlock* bb = *bb_it;
87 E : DCHECK(bb != NULL);
88 :
89 E : if (bb->type() != block_graph::BasicBlock::BASIC_CODE_BLOCK)
90 E : continue;
91 : const block_graph::BasicCodeBlock* bcb =
92 E : block_graph::BasicCodeBlock::Cast(bb);
93 E : DCHECK(bcb != NULL);
94 :
95 E : block_graph::BasicBlockSubGraph::Offset offset = bcb->offset();
96 E : DCHECK_NE(block_graph::BasicBlock::kNoOffset, offset);
97 E : core::RelativeAddress rva(block_range.start() + offset);
98 :
99 : // Add a range for the basic-block if it has non-zero size.
100 E : if (bcb->GetInstructionSize() != 0) {
101 E : Range range(rva, bcb->GetInstructionSize());
102 E : if (!heat_map->Insert(range, data)) {
103 i : LOG(ERROR) << "Failed to insert basic code block into heat map.";
104 i : return false;
105 : }
106 : }
107 :
108 : // Iterate over any successors.
109 E : if (!bcb->successors().empty()) {
110 : // The instruction that the successor represents immediately follows
111 : // the instruction itself.
112 E : rva += bcb->GetInstructionSize();
113 :
114 : block_graph::BasicBlock::Successors::const_iterator succ_it =
115 E : bcb->successors().begin();
116 E : for (; succ_it != bcb->successors().end(); ++succ_it) {
117 E : if (succ_it->instruction_size() != 0) {
118 E : Range range(rva, succ_it->instruction_size());
119 E : if (!heat_map->Insert(range, data)) {
120 i : LOG(ERROR) << "Failed to insert successor into heat map.";
121 i : return false;
122 : }
123 : }
124 E : }
125 : }
126 E : }
127 E : handled_basic_blocks = true;
128 : }
129 E : }
130 :
131 E : if (!handled_basic_blocks) {
132 : // If we couldn't basic block decompose then we simply treat it as a
133 : // single macro block.
134 E : if (!heat_map->Insert(block_range, data)) {
135 i : LOG(ERROR) << "Failed to insert code block into heat map.";
136 i : return false;
137 : }
138 : }
139 :
140 E : return true;
141 E : }
142 :
143 : // Builds an empty heat map for the given module. One range is created per
144 : // basic-block. Non-decomposable code blocks are represented by a single range.
145 : bool BuildEmptyHeatMap(const SampleGrinder::ModuleKey& module_key,
146 : const SampleGrinder::ModuleData& module_data,
147 : core::StringTable* string_table,
148 E : HeatMap* heat_map) {
149 E : DCHECK(string_table != NULL);
150 E : DCHECK(heat_map != NULL);
151 :
152 E : pe::PEFile image;
153 E : if (!image.Init(module_data.module_path)) {
154 i : LOG(ERROR) << "Failed to read PE file \""
155 : << module_data.module_path.value() << "\".";
156 i : return false;
157 : }
158 :
159 : // Decompose the module.
160 E : pe::Decomposer decomposer(image);
161 E : BlockGraph bg;
162 E : pe::ImageLayout image_layout(&bg);
163 E : LOG(INFO) << "Decomposing module \"" << module_data.module_path.value()
164 : << "\".";
165 E : if (!decomposer.Decompose(&image_layout)) {
166 i : LOG(ERROR) << "Failed to decompose module \""
167 : << module_data.module_path.value() << "\".";
168 i : return false;
169 : }
170 :
171 E : pe::PETransformPolicy policy;
172 :
173 : // Iterate over all of the code blocks and basic-block decompose them.
174 E : LOG(INFO) << "Creating initial basic-block heat map for module \""
175 : << module_data.module_path.value() << "\".";
176 : BlockGraph::AddressSpace::AddressSpaceImpl::const_iterator block_it =
177 E : image_layout.blocks.begin();
178 E : for (; block_it != image_layout.blocks.end(); ++block_it) {
179 : // We only care about code blocks. We also don't care about gap blocks,
180 : // which have no meaningful content.
181 E : const BlockGraph::Block* block = block_it->second;
182 E : if (block->type() != BlockGraph::CODE_BLOCK)
183 E : continue;
184 E : if (block->attributes() & BlockGraph::GAP_BLOCK)
185 E : continue;
186 :
187 : if (!BuildHeatMapForCodeBlock(policy, block_it->first, block, string_table,
188 E : heat_map)) {
189 i : return false;
190 : }
191 E : }
192 :
193 E : return true;
194 E : }
195 :
196 : bool BuildEmptyHeatMap(const base::FilePath& image_path,
197 : LineInfo* line_info,
198 E : HeatMap* heat_map) {
199 E : DCHECK(line_info != NULL);
200 E : DCHECK(heat_map != NULL);
201 :
202 E : base::FilePath pdb_path;
203 : if (!pe::FindPdbForModule(image_path, &pdb_path) ||
204 E : pdb_path.empty()) {
205 i : LOG(ERROR) << "Unable to find PDB for image \"" << image_path.value()
206 : << "\".";
207 i : return false;
208 : }
209 E : if (!line_info->Init(pdb_path)) {
210 i : LOG(ERROR) << "Failed to read line info from PDB \""
211 : << pdb_path.value() << "\".";
212 i : return false;
213 : }
214 :
215 E : for (size_t i = 0; i < line_info->source_lines().size(); ++i) {
216 E : const LineInfo::SourceLine& line = line_info->source_lines()[i];
217 E : SampleGrinder::BasicBlockData data = { NULL, NULL, 0.0 };
218 E : Range range(line.address, line.size);
219 : // We don't care about collisions, because there are often multiple
220 : // lines that will map to the same source range.
221 E : heat_map->Insert(range, data);
222 E : }
223 :
224 E : return true;
225 E : }
226 :
227 E : bool RollUpToLines(const HeatMap& heat_map, LineInfo* line_info) {
228 E : DCHECK(line_info != NULL);
229 :
230 : // Determine the minimum non-zero amount of heat in any bucket. We scale
231 : // heat by this to integer values.
232 E : double min_heat = std::numeric_limits<double>::max();
233 E : HeatMap::const_iterator heat_it = heat_map.begin();
234 E : for (; heat_it != heat_map.end(); ++heat_it) {
235 E : double h = heat_it->second.heat;
236 E : if (h > 0 && h < min_heat)
237 E : min_heat = h;
238 E : }
239 :
240 : // Scale the heat values to integers, and update the line info.
241 E : for (heat_it = heat_map.begin(); heat_it != heat_map.end(); ++heat_it) {
242 E : double d = heat_it->second.heat;
243 E : if (d == 0)
244 E : continue;
245 E : d /= min_heat;
246 :
247 : // Use saturation arithmetic, and ensure that no value is zero.
248 E : uint32 ui = 0;
249 E : if (d >= std::numeric_limits<uint32>::max()) {
250 i : ui = std::numeric_limits<uint32>::max();
251 i : } else {
252 E : ui = static_cast<uint32>(d);
253 E : if (ui == 0)
254 i : ui = 1;
255 : }
256 :
257 : // Increment the weight associated with the BB-range in the line info.
258 E : if (!line_info->Visit(heat_it->first.start(), heat_it->first.size(), ui)) {
259 i : LOG(ERROR) << "LineInfo::Visit failed.";
260 i : return false;
261 : }
262 E : }
263 :
264 E : return true;
265 E : }
266 :
267 : // Output the given @p heat_map to the given @p file in CSV format.
268 E : bool OutputHeatMap(const HeatMap& heat_map, FILE* file) {
269 E : if (::fprintf(file, "RVA, Size, Compiland, Function, Heat\n") <= 0)
270 i : return false;
271 E : HeatMap::const_iterator it = heat_map.begin();
272 E : for (; it != heat_map.end(); ++it) {
273 : if (::fprintf(file,
274 : "0x%08X, %d, %s, %s, %.10e\n",
275 : it->first.start().value(),
276 : it->first.size(),
277 : it->second.compiland->c_str(),
278 : it->second.function->c_str(),
279 E : it->second.heat) <= 0) {
280 i : return false;
281 : }
282 E : }
283 E : return true;
284 E : }
285 :
286 : // A type used for converting NameHeatMaps to a sorted vector.
287 : typedef std::pair<double, const std::string*> HeatNamePair;
288 :
289 : // Comparator for heat-name pairs, sorting by decreasing heat then increasing
290 : // name.
291 : struct HeatNamePairComparator {
292 E : bool operator()(const HeatNamePair& hnp1, const HeatNamePair& hnp2) const {
293 E : if (hnp1.first > hnp2.first)
294 E : return true;
295 E : if (hnp1.first < hnp2.first)
296 E : return false;
297 E : return *hnp1.second < *hnp2.second;
298 E : }
299 : };
300 :
301 : // Output the given @p name_heat_map to the given @p file in CSV format.
302 : // The column header that is output will depend on the @p aggregation_level.
303 : bool OutputNameHeatMap(SampleGrinder::AggregationLevel aggregation_level,
304 : const SampleGrinder::NameHeatMap& name_heat_map,
305 E : FILE* file) {
306 : DCHECK(aggregation_level == SampleGrinder::kCompiland ||
307 E : aggregation_level == SampleGrinder::kFunction);
308 E : const char* name = "Compiland";
309 E : if (aggregation_level == SampleGrinder::kFunction)
310 E : name = "Function";
311 E : if (::fprintf(file, "%s, Heat\n", name) <= 0)
312 i : return false;
313 :
314 E : std::vector<HeatNamePair> heat_name_pairs;
315 E : heat_name_pairs.reserve(name_heat_map.size());
316 :
317 E : SampleGrinder::NameHeatMap::const_iterator it = name_heat_map.begin();
318 E : for (; it != name_heat_map.end(); ++it) {
319 E : heat_name_pairs.push_back(HeatNamePair(it->second, it->first));
320 E : }
321 :
322 : std::sort(heat_name_pairs.begin(),
323 : heat_name_pairs.end(),
324 E : HeatNamePairComparator());
325 :
326 E : for (size_t i = 0; i < heat_name_pairs.size(); ++i) {
327 E : const HeatNamePair& hnp = heat_name_pairs[i];
328 E : if (::fprintf(file, "%s, %.10e\n", hnp.second->c_str(), hnp.first) <= 0)
329 i : return false;
330 E : }
331 :
332 E : return true;
333 E : }
334 :
335 : } // namespace
336 :
337 : // NOTE: This must be kept in sync with SampleGrinder::AggregationLevel.
338 : const char* SampleGrinder::kAggregationLevelNames[] = {
339 : "basic-block", "function", "compiland", "line" };
340 : static_assert(arraysize(SampleGrinder::kAggregationLevelNames) ==
341 : SampleGrinder::kAggregationLevelMax,
342 : "Aggregation level names out of sync.");
343 :
344 : const char SampleGrinder::kAggregationLevel[] = "aggregation-level";
345 : const char SampleGrinder::kImage[] = "image";
346 :
347 : SampleGrinder::SampleGrinder()
348 : : aggregation_level_(kBasicBlock),
349 : parser_(NULL),
350 : event_handler_errored_(false),
351 E : clock_rate_(0.0) {
352 E : }
353 :
354 E : SampleGrinder::~SampleGrinder() {
355 E : }
356 :
357 E : bool SampleGrinder::ParseCommandLine(const base::CommandLine* command_line) {
358 E : DCHECK(command_line != NULL);
359 :
360 E : if (command_line->HasSwitch(kAggregationLevel)) {
361 E : std::string s = command_line->GetSwitchValueASCII(kAggregationLevel);
362 E : bool known_level = false;
363 E : for (size_t i = 0; i < arraysize(kAggregationLevelNames); ++i) {
364 E : if (base::LowerCaseEqualsASCII(s, kAggregationLevelNames[i])) {
365 E : known_level = true;
366 E : aggregation_level_ = static_cast<AggregationLevel>(i);
367 E : break;
368 : }
369 E : }
370 :
371 E : if (!known_level) {
372 E : LOG(ERROR) << "Unknown aggregation level: " << s << ".";
373 E : return false;
374 : }
375 E : }
376 :
377 : // Parse the image parameter, and initialize information about the image of
378 : // interest.
379 E : image_path_ = command_line->GetSwitchValuePath(kImage);
380 E : if (image_path_.empty()) {
381 E : if (aggregation_level_ == kBasicBlock) {
382 E : LOG(ERROR) << "Must specify --image in basic-block mode.";
383 E : return false;
384 : }
385 E : } else {
386 E : if (!image_.Init(image_path_)) {
387 i : LOG(ERROR) << "Failed to parse image \"" << image_path_.value() << "\".";
388 i : return false;
389 : }
390 E : image_.GetSignature(&image_signature_);
391 : }
392 :
393 E : return true;
394 E : }
395 :
396 E : void SampleGrinder::SetParser(Parser* parser) {
397 E : DCHECK(parser != NULL);
398 E : parser_ = parser;
399 E : }
400 :
401 E : bool SampleGrinder::Grind() {
402 E : if (event_handler_errored_) {
403 i : LOG(WARNING) << "Failed to handle all TraceSampleData records, results "
404 : << "will be partial.";
405 : }
406 :
407 : // Bail if no data has been processed.
408 E : if (module_data_.empty()) {
409 i : if (!image_path_.empty()) {
410 i : LOG(ERROR) << "No sample data was found for module \""
411 : << image_path_.value() << "\".";
412 i : return false;
413 i : } else {
414 i : LOG(ERROR) << "No sample data encountered.";
415 i : return false;
416 : }
417 : }
418 :
419 : // Process each module.
420 E : ModuleDataMap::const_iterator mod_it = module_data_.begin();
421 E : for (; mod_it != module_data_.end(); ++mod_it) {
422 E : LOG(INFO) << "Processing aggregate samples for module \""
423 : << mod_it->second.module_path.value() << "\".";
424 :
425 : // Build an empty heat map. How exactly we do this depends on the
426 : // aggregation mode.
427 E : bool empty_heat_map_built = false;
428 E : if (aggregation_level_ == kLine) {
429 : // In line aggregation mode we simply extract line info from the PDB.
430 : empty_heat_map_built = BuildEmptyHeatMap(
431 E : mod_it->second.module_path, &line_info_, &heat_map_);
432 E : } else {
433 : // In basic-block, function and compiland aggregation mode we decompose
434 : // the image to get compilands, functions and basic blocks.
435 : // TODO(chrisha): We shouldn't need full decomposition for this.
436 : empty_heat_map_built = BuildEmptyHeatMap(
437 E : mod_it->first, mod_it->second, &string_table_, &heat_map_);
438 : }
439 :
440 E : if (!empty_heat_map_built) {
441 i : LOG(ERROR) << "Unable to build empty heat map for module \""
442 : << mod_it->second.module_path.value() << "\".";
443 i : return false;
444 : }
445 :
446 : // Populate the heat map by pouring the sample data into it. If any samples
447 : // did not map to code blocks then output a warning.
448 E : double total = 0.0;
449 : double orphaned = IncrementHeatMapFromModuleData(
450 E : mod_it->second, &heat_map_, &total);
451 E : if (orphaned > 0) {
452 i : LOG(WARNING) << base::StringPrintf("%.2f%% (%.4f s) ",
453 : orphaned / total,
454 : orphaned)
455 : << "samples were orphaned for module \""
456 : << mod_it->second.module_path.value() << "\".";
457 : }
458 :
459 E : if (aggregation_level_ == kFunction || aggregation_level_ == kCompiland) {
460 E : LOG(INFO) << "Rolling up basic-block heat to \""
461 : << kAggregationLevelNames[aggregation_level_] << "\" level.";
462 E : RollUpByName(aggregation_level_, heat_map_, &name_heat_map_);
463 : // We can clear the heat map as it was only needed as an intermediate.
464 E : heat_map_.Clear();
465 E : } else if (aggregation_level_ == kLine) {
466 E : LOG(INFO) << "Rolling up basic-block heat to lines.";
467 E : if (!RollUpToLines(heat_map_, &line_info_)) {
468 i : LOG(ERROR) << "Failed to roll-up heat to lines.";
469 i : return false;
470 : }
471 : // We can clear the heat map as it was only needed as an intermediate.
472 E : heat_map_.Clear();
473 : }
474 E : }
475 :
476 E : return true;
477 E : }
478 :
479 E : bool SampleGrinder::OutputData(FILE* file) {
480 : // If the aggregation level is basic-block, then output the data in the
481 : // HeatMap.
482 E : bool success = false;
483 E : if (aggregation_level_ == kBasicBlock) {
484 E : success = OutputHeatMap(heat_map_, file);
485 E : } else if (aggregation_level_ == kFunction ||
486 E : aggregation_level_ == kCompiland) {
487 : // If we've aggregated by function or compiland then output the data in
488 : // the NameHeatMap.
489 E : success = OutputNameHeatMap(aggregation_level_, name_heat_map_, file);
490 E : } else {
491 : // Otherwise, we're aggregating to lines and we output cache-grind formatted
492 : // line-info data.
493 E : DCHECK_EQ(kLine, aggregation_level_);
494 E : CoverageData coverage_data;
495 E : coverage_data.Add(line_info_);
496 E : success = WriteCacheGrindCoverageFile(coverage_data, file);
497 E : }
498 :
499 E : if (!success) {
500 i : LOG(ERROR) << "Failed to write to file.";
501 i : return false;
502 : }
503 :
504 E : return true;
505 E : }
506 :
507 : void SampleGrinder::OnProcessStarted(base::Time time,
508 : DWORD process_id,
509 E : const TraceSystemInfo* data) {
510 E : DCHECK(data != NULL);
511 E : clock_rate_ = data->clock_info.tsc_info.frequency;
512 : return;
513 E : }
514 :
515 : // @name ParseEventHandler implementation.
516 : // @{
517 : // Override of the OnSampleData callback.
518 : void SampleGrinder::OnSampleData(base::Time Time,
519 : DWORD process_id,
520 E : const TraceSampleData* data) {
521 E : DCHECK(data != NULL);
522 :
523 E : if (data->bucket_count == 0) {
524 i : LOG(INFO) << "Skipping empty TraceSampleData record.";
525 i : return;
526 : }
527 :
528 : const ModuleInformation* module_info = parser_->GetModuleInformation(
529 E : process_id, AbsoluteAddress64(data->module_base_addr));
530 E : if (module_info == NULL) {
531 i : LOG(ERROR) << "Failed to find module information for TraceSampleData "
532 : << "record.";
533 i : event_handler_errored_ = true;
534 i : return;
535 : }
536 :
537 : // Filter based on the image of interest, if provided.
538 E : if (!image_path_.empty()) {
539 : if (image_signature_.module_size != module_info->module_size ||
540 : image_signature_.module_checksum != module_info->module_checksum ||
541 : image_signature_.module_time_date_stamp !=
542 E : module_info->module_time_date_stamp) {
543 i : LOG(INFO) << "Skipping sample data for module \""
544 : << module_info->path << "\".";
545 i : return;
546 : }
547 : }
548 :
549 : // Get the summary data associated with this module.
550 : ModuleData* module_data = GetModuleData(
551 E : base::FilePath(module_info->path), data);
552 :
553 E : LOG(INFO) << "Aggregating sample info for module \""
554 : << module_data->module_path.value() << "\".";
555 :
556 : // Make sure that we have a high enough bucket resolution to be able to
557 : // represent the data that we're processing. This may involve 'upsampling'
558 : // previously collected data.
559 E : UpsampleModuleData(data, module_data);
560 :
561 : // Update our running totals.
562 E : IncrementModuleData(clock_rate_, data, module_data);
563 :
564 : return;
565 E : }
566 :
567 : SampleGrinder::ModuleData* SampleGrinder::GetModuleData(
568 : const base::FilePath& module_path,
569 E : const TraceSampleData* sample_data) {
570 E : DCHECK(sample_data != NULL);
571 :
572 : // Fill out the key portion of the ModuleSampleData and find/insert a new
573 : // entry into the set.
574 : ModuleKey key = { sample_data->module_size,
575 : sample_data->module_checksum,
576 E : sample_data->module_time_date_stamp };
577 :
578 : // Find or insert the module data.
579 : std::pair<ModuleDataMap::iterator, bool> result = module_data_.insert(
580 E : std::make_pair(key, ModuleData()));
581 :
582 : // If this was a fresh insertion then set the path.
583 E : if (result.second)
584 E : result.first->second.module_path = module_path;
585 :
586 E : DCHECK(result.first != module_data_.end());
587 E : return &(result.first->second);
588 E : }
589 :
590 : void SampleGrinder::UpsampleModuleData(
591 : const TraceSampleData* sample_data,
592 E : SampleGrinder::ModuleData* module_data) {
593 E : DCHECK(sample_data != NULL);
594 E : DCHECK(module_data != NULL);
595 :
596 : // Special case: we're not yet initialized. Simply allocate buckets.
597 E : if (module_data->bucket_size == 0) {
598 E : module_data->bucket_size = sample_data->bucket_size;
599 E : module_data->bucket_start = GetBucketStart(sample_data);
600 E : module_data->buckets.resize(sample_data->bucket_count);
601 E : return;
602 : }
603 :
604 : // If we're already as coarse or finer then there's nothing to do.
605 E : if (module_data->bucket_size <= sample_data->bucket_size)
606 E : return;
607 :
608 : // Grow the buckets in place, and then fill in the scaled values tail first.
609 E : std::vector<double>& buckets = module_data->buckets;
610 E : size_t old_size = buckets.size();
611 E : size_t factor = module_data->bucket_size / sample_data->bucket_size;
612 E : size_t new_size = old_size * factor;
613 E : buckets.resize(new_size);
614 E : for (size_t i = old_size, j = new_size; i > 0; ) {
615 E : --i;
616 E : double new_value = buckets[i] / factor;
617 :
618 E : for (size_t k = 0; k < factor; ++k) {
619 E : --j;
620 E : buckets[j] = new_value;
621 E : }
622 E : }
623 :
624 : // Update the bucket size.
625 E : module_data->bucket_size = sample_data->bucket_size;
626 :
627 : return;
628 E : }
629 :
630 : // Increments the module data with the given sample data. Returns false and
631 : // logs if this is not possible due to invalid data.
632 : bool SampleGrinder::IncrementModuleData(
633 : double clock_rate,
634 : const TraceSampleData* sample_data,
635 E : SampleGrinder::ModuleData* module_data) {
636 E : DCHECK_LT(0.0, clock_rate);
637 E : DCHECK(sample_data != NULL);
638 E : DCHECK(module_data != NULL);
639 E : DCHECK(common::IsPowerOfTwo(sample_data->bucket_size));
640 E : DCHECK(common::IsPowerOfTwo(module_data->bucket_size));
641 E : DCHECK_GE(sample_data->bucket_size, module_data->bucket_size);
642 :
643 : // The bucket starts need to be consistent.
644 E : if (GetBucketStart(sample_data) != module_data->bucket_start) {
645 E : LOG(ERROR) << "TraceSampleData has an inconsistent bucket start.";
646 E : return false;
647 : }
648 :
649 : // Calculate how many aggregate buckets are touched per sample bucket.
650 E : size_t factor = sample_data->bucket_size / module_data->bucket_size;
651 :
652 : // The number of buckets also need to be consistent. The bucket size in
653 : // the sample data is strictly greater than ours. If we convert to the number
654 : // of equivalent buckets there should be no more than factor - 1 more of them
655 : // in order for the module sizes to be consistent.
656 : size_t equivalent_buckets = sample_data->bucket_size *
657 E : sample_data->bucket_count / module_data->bucket_size;
658 E : DCHECK_GE(equivalent_buckets, module_data->buckets.size());
659 E : if (equivalent_buckets - module_data->buckets.size() >= factor) {
660 E : LOG(ERROR) << "TraceSampleData has inconsistent bucket count.";
661 E : return false;
662 : }
663 :
664 : // Calculate the scaling factor which will convert a sample into 'seconds'
665 : // spent in that sample.
666 : double seconds = static_cast<double>(sample_data->sampling_interval) /
667 E : clock_rate;
668 :
669 : // Walk through the sample buckets.
670 E : const uint32* buckets = sample_data->buckets;
671 E : std::vector<double>& agg_buckets = module_data->buckets;
672 E : for (size_t i = 0, j = 0; i < sample_data->bucket_count; ++i) {
673 : // Special case: handle empty buckets explicitly, as they often occur.
674 E : if (buckets[i] == 0) {
675 E : j += factor;
676 E : continue;
677 : }
678 :
679 : // Scale the sample count so that it represents 'seconds' spent in the
680 : // given block.
681 E : double weight = static_cast<double>(buckets[i]) * seconds / factor;
682 :
683 : // Walk through the touched aggregate buckets and increment them.
684 E : for (size_t k = 0; k < factor; ++k, ++j)
685 E : agg_buckets[j] += weight;
686 E : }
687 :
688 E : return true;
689 E : }
690 :
691 : double SampleGrinder::IncrementHeatMapFromModuleData(
692 : const SampleGrinder::ModuleData& module_data,
693 : HeatMap* heat_map,
694 E : double* total_samples) {
695 E : DCHECK(heat_map != NULL);
696 :
697 E : double orphaned_samples = 0.0;
698 E : double temp_total_samples = 0.0;
699 :
700 : // We walk through the sample buckets, and for each one we find the range of
701 : // heat map entries that intersect with it. We then divide up the heat to
702 : // each of these ranges in proportion to the size of their intersection.
703 E : core::RelativeAddress rva_bucket(module_data.bucket_start);
704 E : HeatMap::iterator it = heat_map->begin();
705 E : size_t i = 0;
706 E : for (; i < module_data.buckets.size(); ++i) {
707 : // Advance the current heat map range as long as it's strictly to the left
708 : // of the current bucket.
709 E : while (it != heat_map->end() && it->first.end() <= rva_bucket)
710 E : ++it;
711 E : if (it == heat_map->end())
712 E : break;
713 :
714 : // If the current heat map range is strictly to the right of the current
715 : // bucket then those samples have nowhere to be distributed.
716 E : if (rva_bucket + module_data.bucket_size <= it->first.start()) {
717 : // Tally them up as orphaned samples.
718 E : orphaned_samples += module_data.buckets[i];
719 E : } else {
720 : // Otherwise we heat map ranges that overlap the current bucket.
721 :
722 : // Advance the current heat map range until we're strictly to the right
723 : // of the current bucket.
724 E : HeatMap::iterator it_end = it;
725 E : ++it_end;
726 : while (it_end != heat_map->end() &&
727 E : it_end->first.start() < rva_bucket + module_data.bucket_size) {
728 E : ++it_end;
729 E : }
730 :
731 : // Find the total size of the intersections, to be used as a scaling
732 : // value for distributing the samples. This is done so that *all* of the
733 : // samples are distributed, as the bucket may span space that is not
734 : // covered by any heat map ranges.
735 E : size_t total_intersection = 0;
736 E : for (HeatMap::iterator it2 = it; it2 != it_end; ++it2) {
737 : total_intersection += IntersectionSize(it2->first,
738 E : rva_bucket, module_data.bucket_size);
739 E : }
740 :
741 : // Now distribute the samples to the various ranges.
742 E : for (HeatMap::iterator it2 = it; it2 != it_end; ++it2) {
743 : size_t intersection = IntersectionSize(it2->first,
744 E : rva_bucket, module_data.bucket_size);
745 : it2->second.heat += intersection * module_data.buckets[i] /
746 E : total_intersection;
747 E : }
748 : }
749 :
750 : // Advance past the current bucket.
751 E : temp_total_samples += module_data.buckets[i];
752 E : rva_bucket += module_data.bucket_size;
753 E : }
754 :
755 : // Pick up any trailing orphaned buckets.
756 E : for (; i < module_data.buckets.size(); ++i) {
757 E : orphaned_samples += module_data.buckets[i];
758 E : temp_total_samples += module_data.buckets[i];
759 E : }
760 :
761 E : if (total_samples != NULL)
762 E : *total_samples = temp_total_samples;
763 :
764 E : return orphaned_samples;
765 E : }
766 :
767 : void SampleGrinder::RollUpByName(AggregationLevel aggregation_level,
768 : const HeatMap& heat_map,
769 E : NameHeatMap* name_heat_map) {
770 E : DCHECK(aggregation_level == kFunction || aggregation_level == kCompiland);
771 E : DCHECK(name_heat_map != NULL);
772 :
773 E : HeatMap::const_iterator it = heat_map.begin();
774 E : for (; it != heat_map.end(); ++it) {
775 E : const std::string* name = it->second.function;
776 E : if (aggregation_level == kCompiland)
777 E : name = it->second.compiland;
778 :
779 : NameHeatMap::iterator nhm_it = name_heat_map->insert(
780 E : std::make_pair(name, 0.0)).first;
781 E : nhm_it->second += it->second.heat;
782 E : }
783 E : }
784 :
785 : bool SampleGrinder::ModuleKey::operator<(
786 i : const ModuleKey& rhs) const {
787 i : if (module_size < rhs.module_size)
788 i : return true;
789 i : if (module_size > rhs.module_size)
790 i : return false;
791 i : if (module_checksum < rhs.module_checksum)
792 i : return true;
793 i : if (module_checksum > rhs.module_checksum)
794 i : return false;
795 i : if (module_time_date_stamp < rhs.module_time_date_stamp)
796 i : return true;
797 i : return false;
798 i : }
799 :
800 : } // namespace grinders
801 : } // namespace grinder
|