Coverage for /Syzygy/reorder/linear_order_generator.cc

CoverageLines executed / instrumented / missingexe / inst / missLanguageGroup
78.8%1081370.C++source

Line-by-line coverage:

   1    :  // Copyright 2012 Google Inc.
   2    :  //
   3    :  // Licensed under the Apache License, Version 2.0 (the "License");
   4    :  // you may not use this file except in compliance with the License.
   5    :  // You may obtain a copy of the License at
   6    :  //
   7    :  //     http://www.apache.org/licenses/LICENSE-2.0
   8    :  //
   9    :  // Unless required by applicable law or agreed to in writing, software
  10    :  // distributed under the License is distributed on an "AS IS" BASIS,
  11    :  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12    :  // See the License for the specific language governing permissions and
  13    :  // limitations under the License.
  14    :  
  15    :  #include "syzygy/reorder/linear_order_generator.h"
  16    :  
  17    :  #include <algorithm>
  18    :  
  19    :  namespace reorder {
  20    :  
  21    :  namespace {
  22    :  
  23    :  // This is effectively arbitrarily chosen for now. Higher is better from
  24    :  // benchmarked reorderings. This value is effectively 'infinite' as far as the
  25    :  // chrome data goes.
  26    :  const size_t kDataRecursionDepth = 100;
  27    :  
  28    :  typedef LinearOrderGenerator::BlockCall BlockCall;
  29    :  
  30    :  // Comparator for sorting BlockCalls by increasing time.
  31    :  struct BlockCallSortIncrTime {
  32  E :    bool operator()(const BlockCall& bc1, const BlockCall& bc2) {
  33  E :      return bc1.time < bc2.time;
  34  E :    }
  35    :  };
  36    :  
  37    :  // Used for aggregating block call information across multiple runs.
  38    :  struct AverageBlockCall {
  39    :    const block_graph::BlockGraph::Block* block;
  40    :    size_t sum_order;
  41    :    // The number of runs of the instrumented binary in which this block was
  42    :    // seen, NOT the number of times it was seen called in aggregate.
  43    :    size_t call_count;
  44    :    // This is only meaningful if call_count == 1.
  45    :    uint32_t process_group_id;
  46    :  
  47  E :    double AverageOrder() const {
  48  E :      DCHECK_LT(0U, call_count);
  49  E :      return static_cast<double>(sum_order) / call_count;
  50  E :    }
  51    :  };
  52    :  
  53    :  // Sorts by decreasing call count. Anything with more than one call count
  54    :  // is sorted with a secondary key of increasing order. Anything with a single
  55    :  // call count has a secondary key of process_group_id, and a tertiary key of
  56    :  // increasing order.
  57    :  struct AverageBlockCallSort {
  58  E :    bool operator()(const AverageBlockCall& abc1, const AverageBlockCall& abc2) {
  59  E :      if (abc1.call_count != abc2.call_count)
  60  E :        return abc1.call_count > abc2.call_count;
  61    :  
  62  E :      if (abc1.call_count > 1 || abc1.process_group_id == abc2.process_group_id)
  63  E :        return abc1.AverageOrder() < abc2.AverageOrder();
  64    :  
  65  E :      return abc1.process_group_id < abc2.process_group_id;
  66  E :    }
  67    :  };
  68    :  
  69    :  // Extract the values of a map to a vector.
  70    :  template<typename K, typename V> void MapToVector(const std::map<K, V> map,
  71  E :                                                    std::vector<V>* vector) {
  72  E :    DCHECK(vector != NULL);
  73    :  
  74  E :    vector->clear();
  75  E :    std::map<K, V>::const_iterator it = map.begin();
  76  E :    for (; it != map.end(); ++it)
  77  E :      vector->push_back(it->second);
  78  E :  }
  79    :  
  80    :  }  // namespace
  81    :  
  82    :  LinearOrderGenerator::LinearOrderGenerator()
  83    :      : Reorderer::OrderGenerator("Linear Order Generator"),
  84    :        active_process_count_(0),
  85  E :        next_process_group_id_(0) {
  86  E :  }
  87    :  
  88  E :  LinearOrderGenerator::~LinearOrderGenerator() {
  89  E :  }
  90    :  
  91    :  bool LinearOrderGenerator::OnProcessStarted(uint32 process_id,
  92  E :                                              const UniqueTime& time) {
  93  E :    if (active_process_count_ == 0) {
  94  E :      if (!CloseProcessGroup())
  95  i :        return false;
  96    :    }
  97    :  
  98  E :    ++active_process_count_;
  99    :  
 100  E :    return true;
 101  E :  }
 102    :  
 103    :  bool LinearOrderGenerator::OnProcessEnded(uint32 process_id,
 104  E :                                            const UniqueTime& time) {
 105  E :    DCHECK_LT(0U, active_process_count_);
 106  E :    --active_process_count_;
 107  E :    return true;
 108  E :  }
 109    :  
 110    :  bool LinearOrderGenerator::OnCodeBlockEntry(const BlockGraph::Block* block,
 111    :                                              RelativeAddress address,
 112    :                                              uint32 process_id,
 113    :                                              uint32 thread_id,
 114  E :                                              const UniqueTime& time) {
 115  E :    return TouchBlock(BlockCall(block, process_id, thread_id, time));
 116  E :  }
 117    :  
 118    :  bool LinearOrderGenerator::CalculateReordering(const PEFile& pe_file,
 119    :                                                 const ImageLayout& image,
 120    :                                                 bool reorder_code,
 121    :                                                 bool reorder_data,
 122  E :                                                 Order* order) {
 123  E :    DCHECK(order != NULL);
 124    :  
 125    :    // Ensure the last running process group is closed.
 126  E :    if (!CloseProcessGroup())
 127  i :      return false;
 128    :  
 129  E :    LOG(INFO) << "Encountered " << process_group_calls_.size()
 130    :              << " process groups.";
 131    :  
 132    :    // Aggregate the block calls.
 133  E :    std::map<const BlockGraph::Block*, AverageBlockCall> average_block_call_map;
 134  E :    ProcessGroupBlockCalls::const_iterator it = process_group_calls_.begin();
 135  E :    for (; it != process_group_calls_.end(); ++it) {
 136  E :      for (size_t i = 0; i < it->second.size(); ++i) {
 137  E :        const BlockCall& block_call = it->second[i];
 138    :        AverageBlockCall& average_block_call =
 139  E :            average_block_call_map[block_call.block];
 140  E :        average_block_call.block = block_call.block;
 141  E :        average_block_call.sum_order += i;
 142  E :        ++average_block_call.call_count;
 143  E :        average_block_call.process_group_id = it->first;
 144  E :      }
 145  E :    }
 146    :  
 147    :    // Now create a sorted list.
 148  E :    std::vector<AverageBlockCall> average_block_calls;
 149  E :    MapToVector(average_block_call_map, &average_block_calls);
 150  E :    average_block_call_map.clear();
 151    :    std::sort(average_block_calls.begin(), average_block_calls.end(),
 152  E :              AverageBlockCallSort());
 153    :  
 154    :    // TODO(chrisha): Create an option to evenly distribute the common startup
 155    :    //     blocks (those with call_count == process_group_calls_.size()) among
 156    :    //     the remaining blocks.
 157    :  
 158    :    // Create the ordering from this list.
 159  E :    BlockSet inserted_blocks;
 160  E :    for (size_t i = 0; i < average_block_calls.size(); ++i) {
 161  E :      const BlockGraph::Block* code_block = average_block_calls[i].block;
 162    :  
 163  E :      if (reorder_code) {
 164  E :        order->section_block_lists[code_block->section()].push_back(code_block);
 165  E :        inserted_blocks.insert(code_block);
 166    :      }
 167    :  
 168    :      // Create an analogous data ordering if we were asked to.
 169  E :      if (reorder_data) {
 170    :        if (!InsertDataBlocks(kDataRecursionDepth, code_block, order,
 171  i :                              &inserted_blocks))
 172  i :          return false;
 173    :      }
 174  E :    }
 175    :  
 176    :    // Add the remaining blocks in each section to the order.
 177  E :    for (size_t section_index = 0; ; ++section_index) {
 178    :      const IMAGE_SECTION_HEADER* section =
 179  E :          pe_file.section_header(section_index);
 180  E :      if (section == NULL)
 181  E :        break;
 182    :  
 183  E :      RelativeAddress section_start = RelativeAddress(section->VirtualAddress);
 184    :      AddressSpace::RangeMapConstIterPair section_blocks =
 185    :          image.blocks.GetIntersectingBlocks(
 186  E :              section_start, section->Misc.VirtualSize);
 187  E :      AddressSpace::RangeMapConstIter& section_it = section_blocks.first;
 188  E :      const AddressSpace::RangeMapConstIter& section_end = section_blocks.second;
 189  E :      for (; section_it != section_end; ++section_it) {
 190  E :        BlockGraph::Block* block = section_it->second;
 191  E :        if (inserted_blocks.count(block) > 0)
 192  E :          continue;
 193  E :        order->section_block_lists[section_index].push_back(block);
 194  E :      }
 195  E :    }
 196    :  
 197  E :    return true;
 198  E :  }
 199    :  
 200  E :  bool LinearOrderGenerator::TouchBlock(const BlockCall& block_call) {
 201  E :    DCHECK(block_call.block != NULL);
 202    :    // All code blocks should belong to a defined section.
 203  E :    DCHECK_NE(pe::kInvalidSection, block_call.block->section());
 204    :  
 205    :    // Store the block along with the earliest time it was called.
 206  E :    BlockCallMap::iterator it = block_call_map_.find(block_call.block);
 207  E :    if (it == block_call_map_.end()) {
 208  E :      std::pair<BlockCallMap::iterator, bool> insert_return;
 209    :      insert_return = block_call_map_.insert(
 210  E :          std::make_pair(block_call.block, block_call));
 211  E :      it = insert_return.first;
 212  E :      DCHECK(insert_return.second);
 213  E :      DCHECK(it != block_call_map_.end());
 214  E :    } else {
 215    :      // Keep around the earliest call to this block only.
 216  E :      if (block_call.time < it->second.time)
 217  i :        it->second = block_call;
 218    :    }
 219  E :    return true;
 220  E :  }
 221    :  
 222    :  bool LinearOrderGenerator::InsertDataBlocks(size_t max_recursion_depth,
 223    :                                              const BlockGraph::Block* block,
 224    :                                              Order* order,
 225  i :                                              BlockSet* inserted_blocks) {
 226  i :    DCHECK(block != NULL);
 227  i :    DCHECK(order != NULL);
 228    :  
 229    :    // Stop the recursion.
 230  i :    if (max_recursion_depth == 0)
 231  i :      return true;
 232    :  
 233  i :    block_graph::ConstBlockVector data_blocks;
 234    :  
 235    :    // Iterate through any data blocks that are referenced by this
 236    :    // block, and also store them with the same time. This is a pessimistic
 237    :    // optimization, and assumes that all data linked to a code block will
 238    :    // be touched by that code block (and all data linked to by that data block,
 239    :    // and so on, up to 'max_recursion_depth').
 240    :    BlockGraph::Block::ReferenceMap::const_iterator ref_it =
 241  i :        block->references().begin();
 242  i :    for (; ref_it != block->references().end(); ++ref_it) {
 243  i :      const BlockGraph::Block* ref = ref_it->second.referenced();
 244  i :      DCHECK(ref != NULL);
 245    :      // We only touch data blocks with a valid section id.
 246    :      if (ref->type() != BlockGraph::DATA_BLOCK ||
 247  i :          ref->section() == pe::kInvalidSection)
 248  i :        continue;
 249    :  
 250    :      // Only insert data blocks that have not yet been seen.
 251  i :      if (!inserted_blocks->insert(ref).second)
 252  i :        continue;
 253    :  
 254    :      // Finally, insert this block to the appropriate section ordering.
 255  i :      order->section_block_lists[ref->section()].push_back(ref);
 256    :  
 257  i :      data_blocks.push_back(ref);
 258  i :    }
 259    :  
 260    :    // Recurse on the data blocks we just added.
 261  i :    if (max_recursion_depth > 1) {
 262  i :      for (size_t i = 0; i < data_blocks.size(); ++i) {
 263    :        if (!InsertDataBlocks(max_recursion_depth - 1, data_blocks[i], order,
 264  i :                              inserted_blocks))
 265  i :          return false;
 266  i :      }
 267    :    }
 268    :  
 269  i :    return true;
 270  i :  }
 271    :  
 272  E :  bool LinearOrderGenerator::CloseProcessGroup() {
 273  E :    if (block_call_map_.size() == 0)
 274  E :      return true;
 275    :  
 276  E :    BlockCalls& block_calls = process_group_calls_[next_process_group_id_];
 277  E :    ++next_process_group_id_;
 278    :  
 279  E :    MapToVector(block_call_map_, &block_calls);
 280  E :    block_call_map_.clear();
 281    :  
 282  E :    std::sort(block_calls.begin(), block_calls.end(), BlockCallSortIncrTime());
 283    :  
 284  E :    return true;
 285  E :  }
 286    :  
 287    :  }  // namespace reorder

Coverage information generated Thu Sep 06 11:30:46 2012.