1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/block_graph/block_graph.h"
16 :
17 : #include <limits>
18 :
19 : #include "base/logging.h"
20 : #include "base/strings/stringprintf.h"
21 :
22 : // Pretty prints a BlockInfo to an ostream. This has to be outside of any
23 : // namespaces so that operator<< is found properly.
24 i : std::ostream& operator<<(std::ostream& os, const block_graph::BlockInfo& bi) {
25 : os << "Block(id=" << bi.block->id() << ", name=\"" << bi.block->name()
26 i : << "\", size=" << bi.block->size();
27 i : if (bi.type != block_graph::BlockInfo::kNoAddress) {
28 i : os << ", address=";
29 i : switch (bi.type) {
30 : case block_graph::BlockInfo::kAbsoluteAddress: {
31 i : os << bi.abs_addr;
32 i : break;
33 : }
34 : case block_graph::BlockInfo::kFileOffsetAddress: {
35 i : os << bi.file_addr;
36 i : break;
37 : }
38 : case block_graph::BlockInfo::kRelativeAddress: {
39 i : os << bi.rel_addr;
40 : break;
41 : }
42 : default: break;
43 : }
44 : }
45 i : os << ")";
46 i : return os;
47 i : }
48 :
49 : namespace block_graph {
50 :
51 : namespace {
52 :
53 : COMPILE_ASSERT(BlockGraph::BLOCK_ATTRIBUTES_MAX_BIT < 32,
54 : too_many_block_attributes);
55 :
56 : // A list of printable names corresponding to image formats. This need to be
57 : // kept in sync with the BlockGraph::ImageFormat enum!
58 : const char* kImageFormat[] = {
59 : "UNKNOWN_FORMAT", "PE_FORMAT", "COFF_FORMAT",
60 : };
61 : COMPILE_ASSERT(arraysize(kImageFormat) == BlockGraph::IMAGE_FORMAT_MAX,
62 : kIamgeFormat_not_in_sync);
63 :
64 : // A list of printable names corresponding to block types. This needs to
65 : // be kept in sync with the BlockGraph::BlockType enum!
66 : const char* kBlockType[] = {
67 : "CODE_BLOCK", "DATA_BLOCK",
68 : };
69 : COMPILE_ASSERT(arraysize(kBlockType) == BlockGraph::BLOCK_TYPE_MAX,
70 : kBlockType_not_in_sync);
71 :
72 : // Shift all items in an offset -> item map by 'distance', provided the initial
73 : // item offset was >= @p offset.
74 : template<typename ItemType>
75 : void ShiftOffsetItemMap(BlockGraph::Offset offset,
76 : BlockGraph::Offset distance,
77 E : std::map<BlockGraph::Offset, ItemType>* items) {
78 E : DCHECK_GE(offset, 0);
79 E : DCHECK_NE(distance, 0);
80 E : DCHECK(items != NULL);
81 :
82 : typedef std::map<BlockGraph::Offset, ItemType> ItemMap;
83 :
84 : // Get iterators to all of the items that need changing.
85 E : std::vector<ItemMap::iterator> item_its;
86 E : ItemMap::iterator item_it = items->lower_bound(offset);
87 E : while (item_it != items->end()) {
88 E : item_its.push_back(item_it);
89 E : ++item_it;
90 E : }
91 :
92 : // Get the direction and bounds of the iteration. We need to walk through
93 : // the iterators in a different order depending on if we're shifting left
94 : // or right. This is to ensure that earlier shifts don't land on the values
95 : // of later unshifted offsets.
96 E : int start = 0;
97 E : int stop = item_its.size();
98 E : int step = 1;
99 E : if (distance > 0) {
100 E : start = stop - 1;
101 E : stop = -1;
102 E : step = -1;
103 : }
104 :
105 E : for (int i = start; i != stop; i += step) {
106 E : item_it = item_its[i];
107 : items->insert(std::make_pair(item_it->first + distance,
108 E : item_it->second));
109 E : items->erase(item_it);
110 E : }
111 E : }
112 :
113 : void ShiftReferences(BlockGraph::Block* block,
114 : BlockGraph::Offset offset,
115 E : BlockGraph::Offset distance) {
116 : // Make a copy of the reference map for simplicity.
117 E : BlockGraph::Block::ReferenceMap references = block->references();
118 :
119 : // Start by removing all references that have moved.
120 : BlockGraph::Block::ReferenceMap::const_iterator it =
121 E : references.lower_bound(offset);
122 E : for (; it != references.end(); ++it) {
123 E : if (it->first >= offset)
124 E : block->RemoveReference(it->first);
125 E : }
126 :
127 : // Then patch up all existing references.
128 E : it = references.begin();
129 E : for (; it != references.end(); ++it) {
130 E : BlockGraph::Reference ref(it->second);
131 E : BlockGraph::Offset new_offset(it->first);
132 :
133 : // If this is self-referential, fix the destination offset.
134 E : if (ref.referenced() == block && ref.offset() >= offset) {
135 : ref = BlockGraph::Reference(ref.type(),
136 : ref.size(),
137 : ref.referenced(),
138 : ref.offset() + distance,
139 E : ref.base() + distance);
140 : }
141 :
142 : // If its offset is past the change point, fix that.
143 E : if (it->first >= offset)
144 E : new_offset += distance;
145 :
146 : // In many cases this'll be a noop.
147 : // TODO(siggi): Optimize this.
148 E : block->SetReference(new_offset, ref);
149 E : }
150 E : }
151 :
152 : // Shift all referrers beyond @p offset by @p distance.
153 : void ShiftReferrers(BlockGraph::Block* self,
154 : BlockGraph::Offset offset,
155 : BlockGraph::Offset distance,
156 E : BlockGraph::Block::ReferrerSet* referrers) {
157 E : DCHECK_GE(offset, 0);
158 E : DCHECK_NE(distance, 0);
159 E : DCHECK(referrers != NULL);
160 :
161 : typedef BlockGraph::Block::ReferrerSet ReferrerSet;
162 : typedef BlockGraph::Reference Reference;
163 :
164 E : ReferrerSet::iterator ref_it = referrers->begin();
165 E : while (ref_it != referrers->end()) {
166 : // We need to keep around the next iterator as 'ref_it' will be invalidated
167 : // if we need to update the reference. (It will be deleted and then
168 : // recreated.)
169 E : ReferrerSet::iterator next_ref_it = ref_it;
170 E : ++next_ref_it;
171 :
172 E : BlockGraph::Block* ref_block = ref_it->first;
173 : // Our own references will have been moved already.
174 E : if (ref_block != self) {
175 E : BlockGraph::Offset ref_offset = ref_it->second;
176 :
177 E : Reference ref;
178 E : bool ref_found = ref_block->GetReference(ref_offset, &ref);
179 E : DCHECK(ref_found);
180 :
181 : // Shift the reference if need be.
182 E : if (ref.offset() >= offset) {
183 : Reference new_ref(ref.type(),
184 : ref.size(),
185 : ref.referenced(),
186 : ref.offset() + distance,
187 E : ref.base() + distance);
188 E : bool inserted = ref_block->SetReference(ref_offset, new_ref);
189 E : DCHECK(!inserted);
190 : }
191 : }
192 :
193 E : ref_it = next_ref_it;
194 E : }
195 E : }
196 :
197 i : const char* BlockAttributeToString(BlockGraph::BlockAttributeEnum attr) {
198 i : switch (attr) {
199 : #define DEFINE_CASE(name) case BlockGraph::name: return #name;
200 i : BLOCK_ATTRIBUTE_ENUM(DEFINE_CASE)
201 : #undef DEFINE_CASE
202 : default:
203 i : NOTREACHED();
204 i : return NULL;
205 : }
206 i : }
207 :
208 : } // namespace
209 :
210 : const char* BlockGraph::ImageFormatToString(ImageFormat format) {
211 : DCHECK_LE(BlockGraph::PE_IMAGE, format);
212 : DCHECK_GT(BlockGraph::IMAGE_FORMAT_MAX, format);
213 : return kImageFormat[format];
214 : }
215 :
216 i : std::string BlockGraph::BlockAttributesToString(BlockAttributes attrs) {
217 i : BlockAttributes attr = 1;
218 i : std::string s;
219 i : for (; attr < BLOCK_ATTRIBUTES_MAX; attr <<= 1) {
220 i : if (attr & attrs) {
221 i : if (!s.empty())
222 i : s.append("|");
223 i : s.append(BlockAttributeToString(static_cast<BlockAttributeEnum>(attr)));
224 : }
225 i : }
226 i : return s;
227 i : }
228 :
229 E : const char* BlockGraph::BlockTypeToString(BlockGraph::BlockType type) {
230 E : DCHECK_LE(BlockGraph::CODE_BLOCK, type);
231 E : DCHECK_GT(BlockGraph::BLOCK_TYPE_MAX, type);
232 E : return kBlockType[type];
233 E : }
234 :
235 : std::string BlockGraph::LabelAttributesToString(
236 E : BlockGraph::LabelAttributes label_attributes) {
237 : static const char* kLabelAttributes[] = {
238 : "Code", "DebugStart", "DebugEnd", "ScopeStart", "ScopeEnd",
239 : "CallSite", "JumpTable", "CaseTable", "Data", "PublicSymbol" };
240 : COMPILE_ASSERT((1 << arraysize(kLabelAttributes)) == LABEL_ATTRIBUTES_MAX,
241 : label_attribute_names_not_in_sync_with_enum);
242 :
243 E : std::string s;
244 E : for (size_t i = 0; i < arraysize(kLabelAttributes); ++i) {
245 E : if (label_attributes & (1 << i)) {
246 E : if (!s.empty())
247 E : s.append("|");
248 E : s.append(kLabelAttributes[i]);
249 : }
250 E : }
251 E : return s;
252 E : }
253 :
254 : const BlockGraph::SectionId BlockGraph::kInvalidSectionId = -1;
255 :
256 : BlockGraph::BlockGraph()
257 : : next_section_id_(0),
258 : next_block_id_(0),
259 E : image_format_(UNKNOWN_IMAGE_FORMAT) {
260 E : }
261 :
262 E : BlockGraph::~BlockGraph() {
263 E : }
264 :
265 : BlockGraph::Section* BlockGraph::AddSection(const base::StringPiece& name,
266 E : uint32 characteristics) {
267 E : Section new_section(next_section_id_++, name, characteristics);
268 : std::pair<SectionMap::iterator, bool> result = sections_.insert(
269 E : std::make_pair(new_section.id(), new_section));
270 E : DCHECK(result.second);
271 :
272 E : return &result.first->second;
273 E : }
274 :
275 E : BlockGraph::Section* BlockGraph::FindSection(const base::StringPiece& name) {
276 E : const BlockGraph* self = const_cast<const BlockGraph*>(this);
277 E : const BlockGraph::Section* section = self->FindSection(name);
278 E : return const_cast<BlockGraph::Section*>(section);
279 E : }
280 :
281 : const BlockGraph::Section* BlockGraph::FindSection(
282 E : const base::StringPiece& name) const {
283 : // This is a linear scan, but thankfully images generally do not have many
284 : // sections and we do not create them very often. Fast lookup by index is
285 : // more important. If this ever becomes an issue, we could keep around a
286 : // second index by name.
287 E : SectionMap::const_iterator it = sections_.begin();
288 E : for (; it != sections_.end(); ++it) {
289 E : if (it->second.name() == name)
290 E : return &it->second;
291 E : }
292 :
293 E : return NULL;
294 E : }
295 :
296 : BlockGraph::Section* BlockGraph::FindOrAddSection(const base::StringPiece& name,
297 E : uint32 characteristics) {
298 E : Section* section = FindSection(name);
299 E : if (section) {
300 E : section->set_characteristic(characteristics);
301 E : return section;
302 : }
303 E : return AddSection(name, characteristics);
304 E : }
305 :
306 E : bool BlockGraph::RemoveSection(Section* section) {
307 E : DCHECK(section != NULL);
308 :
309 E : SectionMap::iterator it(sections_.find(section->id()));
310 E : if (it == sections_.end() || &it->second != section)
311 i : return false;
312 :
313 E : sections_.erase(it);
314 E : return true;
315 E : }
316 :
317 E : bool BlockGraph::RemoveSectionById(SectionId id) {
318 E : SectionMap::iterator it(sections_.find(id));
319 E : if (it == sections_.end())
320 E : return false;
321 :
322 E : sections_.erase(it);
323 E : return true;
324 E : }
325 :
326 : BlockGraph::Block* BlockGraph::AddBlock(BlockType type,
327 : Size size,
328 E : const base::StringPiece& name) {
329 E : BlockId id = ++next_block_id_;
330 : BlockMap::iterator it = blocks_.insert(
331 E : std::make_pair(id, Block(id, type, size, name, this))).first;
332 :
333 E : return &it->second;
334 E : }
335 :
336 E : bool BlockGraph::RemoveBlock(Block* block) {
337 E : DCHECK(block != NULL);
338 :
339 E : BlockMap::iterator it(blocks_.find(block->id()));
340 E : if (it == blocks_.end() || &it->second != block)
341 E : return false;
342 :
343 E : return RemoveBlockByIterator(it);
344 E : }
345 :
346 E : bool BlockGraph::RemoveBlockById(BlockId id) {
347 E : BlockMap::iterator it(blocks_.find(id));
348 E : if (it == blocks_.end())
349 E : return false;
350 :
351 E : return RemoveBlockByIterator(it);
352 E : }
353 :
354 E : BlockGraph::Section* BlockGraph::GetSectionById(SectionId id) {
355 E : SectionMap::iterator it(sections_.find(id));
356 :
357 E : if (it == sections_.end())
358 E : return NULL;
359 :
360 E : return &it->second;
361 E : }
362 :
363 E : const BlockGraph::Section* BlockGraph::GetSectionById(SectionId id) const {
364 E : SectionMap::const_iterator it(sections_.find(id));
365 :
366 E : if (it == sections_.end())
367 E : return NULL;
368 :
369 E : return &it->second;
370 E : }
371 :
372 E : BlockGraph::Block* BlockGraph::GetBlockById(BlockId id) {
373 E : BlockMap::iterator it(blocks_.find(id));
374 :
375 E : if (it == blocks_.end())
376 E : return NULL;
377 :
378 E : return &it->second;
379 E : }
380 :
381 : const BlockGraph::Block* BlockGraph::GetBlockById(BlockId id) const {
382 : BlockMap::const_iterator it(blocks_.find(id));
383 :
384 : if (it == blocks_.end())
385 : return NULL;
386 :
387 : return &it->second;
388 : }
389 :
390 E : bool BlockGraph::RemoveBlockByIterator(BlockMap::iterator it) {
391 E : DCHECK(it != blocks_.end());
392 :
393 : // Verify this block is fully disconnected.
394 E : if (it->second.referrers().size() > 0 || it->second.references().size() > 0)
395 E : return false;
396 :
397 E : blocks_.erase(it);
398 :
399 E : return true;
400 E : }
401 :
402 : BlockGraph::AddressSpace::AddressSpace(BlockGraph* graph)
403 E : : graph_(graph) {
404 E : DCHECK(graph != NULL);
405 E : }
406 :
407 : BlockGraph::Block* BlockGraph::AddressSpace::AddBlock(
408 : BlockType type, RelativeAddress addr, Size size,
409 E : const base::StringPiece& name) {
410 :
411 E : if (size > 0) {
412 : // First check to see that the range is clear.
413 E : AddressSpaceImpl::Range range(addr, size);
414 : AddressSpaceImpl::RangeMap::iterator it =
415 E : address_space_.FindFirstIntersection(range);
416 E : if (it != address_space_.ranges().end())
417 E : return NULL;
418 : }
419 :
420 E : BlockGraph::Block* block = graph_->AddBlock(type, size, name);
421 E : DCHECK(block != NULL);
422 E : bool inserted = InsertImpl(addr, block);
423 E : DCHECK(inserted);
424 :
425 E : return block;
426 E : }
427 :
428 E : bool BlockGraph::AddressSpace::ResizeBlock(Block* block, size_t size) {
429 E : DCHECK_NE(reinterpret_cast<Block*>(NULL), block);
430 :
431 : // Determine if the block is in the address space. If so, this also finds its
432 : // address.
433 E : BlockAddressMap::iterator block_it = block_addresses_.find(block);
434 E : if (block_it == block_addresses_.end())
435 E : return false;
436 :
437 : // If the size isn't changing then we can return right away.
438 E : if (block->size() == size)
439 E : return true;
440 :
441 : // If the original size is non-zero we expect it to be in the address-space.
442 E : AddressSpaceImpl::RangeMap::iterator range_it;
443 E : if (block->size() > 0) {
444 : // Find the iterator associated with this blocks range.
445 E : AddressSpaceImpl::Range range(block_it->second, 1);
446 E : range_it = address_space_.FindContaining(range);
447 E : CHECK(range_it != address_space_.end());
448 :
449 : // We expect the sizes to agree.
450 E : CHECK_EQ(block->size(), range_it->first.size());
451 :
452 : // If the size is growing make sure we can grow it.
453 E : if (range_it->first.size() < size) {
454 : // Get the next block.
455 E : AddressSpaceImpl::RangeMap::iterator range_next = range_it;
456 E : ++range_next;
457 :
458 : // If the current block extended to the new size conflicts with the old
459 : // block then we can't do the resize.
460 : if (range_next != address_space_.end() &&
461 E : range_it->first.start() + size > range_next->first.start()) {
462 E : return false;
463 : }
464 : }
465 :
466 : // Remove the existing block from the address-space.
467 E : address_space_.Remove(range_it);
468 : }
469 :
470 : // Reinsert the block with its new range if its not empty. If the block is
471 : // empty it won't be added back to the address space.
472 E : if (size > 0)
473 E : address_space_.Insert(Range(block_it->second, size), block, &range_it);
474 :
475 : // Update the block with its new size.
476 E : block->set_size(size);
477 :
478 E : return true;
479 E : }
480 :
481 E : bool BlockGraph::AddressSpace::InsertBlock(RelativeAddress addr, Block* block) {
482 E : return InsertImpl(addr, block);
483 E : }
484 :
485 : BlockGraph::Block* BlockGraph::AddressSpace::GetBlockByAddress(
486 E : RelativeAddress addr) const {
487 E : return GetContainingBlock(addr, 1);
488 E : }
489 :
490 : BlockGraph::Block* BlockGraph::AddressSpace::GetContainingBlock(
491 E : RelativeAddress addr, Size size) const {
492 E : AddressSpaceImpl::Range range(addr, size);
493 : AddressSpaceImpl::RangeMap::const_iterator it =
494 E : address_space_.FindContaining(range);
495 E : if (it == address_space_.ranges().end())
496 E : return NULL;
497 :
498 E : return it->second;
499 E : }
500 :
501 : BlockGraph::Block* BlockGraph::AddressSpace::GetFirstIntersectingBlock(
502 E : RelativeAddress addr, Size size) {
503 E : AddressSpaceImpl::Range range(addr, size);
504 : AddressSpaceImpl::RangeMap::iterator it =
505 E : address_space_.FindFirstIntersection(range);
506 E : if (it == address_space_.ranges().end())
507 E : return NULL;
508 :
509 E : return it->second;
510 E : }
511 :
512 : BlockGraph::AddressSpace::RangeMapConstIterPair
513 : BlockGraph::AddressSpace::GetIntersectingBlocks(RelativeAddress address,
514 E : Size size) const {
515 E : return address_space_.FindIntersecting(Range(address, size));
516 E : }
517 :
518 : BlockGraph::AddressSpace::RangeMapIterPair
519 : BlockGraph::AddressSpace::GetIntersectingBlocks(RelativeAddress address,
520 E : Size size) {
521 E : return address_space_.FindIntersecting(Range(address, size));
522 E : }
523 :
524 : bool BlockGraph::AddressSpace::GetAddressOf(const Block* block,
525 E : RelativeAddress* addr) const {
526 E : DCHECK(block != NULL);
527 E : DCHECK(addr != NULL);
528 :
529 E : BlockAddressMap::const_iterator it(block_addresses_.find(block));
530 E : if (it == block_addresses_.end())
531 E : return false;
532 :
533 E : *addr = it->second;
534 E : return true;
535 E : }
536 :
537 E : bool BlockGraph::AddressSpace::InsertImpl(RelativeAddress addr, Block* block) {
538 : // Only insert the block in the address-space if it has non-zero size.
539 E : if (block->size() > 0) {
540 E : Range range(addr, block->size());
541 E : bool inserted = address_space_.Insert(range, block);
542 E : if (!inserted)
543 E : return false;
544 : }
545 :
546 E : bool inserted = block_addresses_.insert(std::make_pair(block, addr)).second;
547 E : DCHECK(inserted);
548 : // Update the address stored in the block.
549 E : block->set_addr(addr);
550 :
551 E : return true;
552 E : }
553 :
554 E : bool BlockGraph::AddressSpace::ContainsBlock(const Block* block) {
555 E : DCHECK(block != NULL);
556 E : return block_addresses_.count(block) != 0;
557 E : }
558 :
559 : BlockGraph::Block* BlockGraph::AddressSpace::MergeIntersectingBlocks(
560 E : const Range& range) {
561 : typedef std::vector<std::pair<RelativeAddress, BlockGraph::Block*>>
562 : BlockAddressVector;
563 :
564 : // Find all the blocks that intersect the range, keep them and their
565 : // addresses. Start by finding the first intersection, then iterate
566 : // from there until we find a block that doesn't intersect with range.
567 : AddressSpaceImpl::RangeMap::iterator address_start =
568 E : address_space_.FindFirstIntersection(range);
569 E : AddressSpaceImpl::RangeMap::iterator address_it(address_start);
570 :
571 E : BlockAddressVector intersecting;
572 : for (; address_it != address_space_.ranges().end() &&
573 E : address_it->first.Intersects(range); ++address_it) {
574 : intersecting.push_back(std::make_pair(address_it->first.start(),
575 E : address_it->second));
576 E : }
577 :
578 : // Bail if the intersection doesn't cover at least two blocks.
579 E : if (intersecting.empty())
580 i : return NULL;
581 :
582 : // In case of single-block intersection, we're done.
583 E : if (intersecting.size() == 1)
584 i : return intersecting[0].second;
585 :
586 E : DCHECK(!intersecting.empty());
587 :
588 : // Calculate the start and end addresses of the new block.
589 E : BlockGraph::Block* first_block = intersecting[0].second;
590 E : BlockGraph::Block* last_block = intersecting[intersecting.size() - 1].second;
591 E : DCHECK(first_block != NULL && last_block != NULL);
592 :
593 E : RelativeAddress begin = std::min(range.start(), intersecting[0].first);
594 : RelativeAddress end = std::max(range.start() + range.size(),
595 E : intersecting[intersecting.size() - 1].first + last_block->size());
596 :
597 E : DCHECK(begin <= range.start());
598 E : DCHECK(end >= range.start() + range.size());
599 :
600 E : base::StringPiece block_name = first_block->name();
601 E : BlockType block_type = first_block->type();
602 E : size_t section_id = first_block->section();
603 E : size_t alignment = first_block->alignment();
604 E : Offset alignment_offset = first_block->alignment_offset();
605 E : BlockAttributes attributes = 0;
606 :
607 : // Some attributes are only propagated if they are present on *all* blocks
608 : // in the range.
609 : static const BlockAttributes kUniformAttributes =
610 : GAP_BLOCK | PADDING_BLOCK | BUILT_BY_SYZYGY;
611 E : BlockAttributes uniform_attributes = kUniformAttributes;
612 :
613 E : BlockGraph::Block::SourceRanges source_ranges;
614 :
615 : // Remove the found blocks from the address space, and make sure they're all
616 : // of the same type and from the same section as the first block. Merge the
617 : // data from all the blocks as we go along, as well as the attributes and
618 : // source ranges.
619 E : std::vector<uint8> merged_data(end - begin);
620 E : bool have_data = false;
621 E : for (size_t i = 0; i < intersecting.size(); ++i) {
622 E : RelativeAddress addr = intersecting[i].first;
623 E : BlockGraph::Block* block = intersecting[i].second;
624 E : DCHECK_EQ(block_type, block->type());
625 E : DCHECK_EQ(section_id, block->section());
626 E : DCHECK_EQ(0U, block->padding_before());
627 :
628 E : if (block->data() != NULL) {
629 i : have_data = true;
630 i : memcpy(&merged_data.at(addr - begin), block->data(), block->data_size());
631 : }
632 :
633 : // Add any non-uniform attributes to the block, and keep track of the
634 : // uniform attributes.
635 E : attributes |= block->attributes() & ~kUniformAttributes;
636 E : uniform_attributes &= block->attributes();
637 :
638 : // Merge in the source ranges from each block.
639 E : BlockGraph::Offset block_offset = addr - begin;
640 : BlockGraph::Block::SourceRanges::RangePairs::const_iterator src_it =
641 E : block->source_ranges().range_pairs().begin();
642 E : for (; src_it != block->source_ranges().range_pairs().end(); ++src_it) {
643 : // The data range is wrt to the containing block, wo we have to translate
644 : // each individual block's offset to an offset in the merged block.
645 E : BlockGraph::Offset merged_offset = block_offset + src_it->first.start();
646 : bool pushed = source_ranges.Push(
647 : BlockGraph::Block::DataRange(merged_offset, src_it->first.size()),
648 E : src_it->second);
649 E : DCHECK(pushed);
650 E : }
651 :
652 E : bool removed = address_space_.Remove(Range(addr, block->size()));
653 E : DCHECK(removed);
654 E : size_t num_removed = block_addresses_.erase(intersecting[i].second);
655 E : DCHECK_EQ(1U, num_removed);
656 E : }
657 :
658 : // Create the new block.
659 : BlockGraph::Block* new_block = AddBlock(block_type,
660 : begin, end - begin,
661 E : block_name);
662 E : DCHECK(new_block != NULL);
663 :
664 : // Set the rest of the properties for the new block.
665 E : new_block->source_ranges() = source_ranges;
666 E : new_block->set_section(section_id);
667 E : new_block->set_alignment(alignment);
668 E : new_block->set_alignment_offset(alignment_offset);
669 E : new_block->set_attributes(attributes | uniform_attributes);
670 E : if (have_data) {
671 i : uint8* data = new_block->CopyData(merged_data.size(), &merged_data.at(0));
672 i : if (data == NULL) {
673 i : LOG(ERROR) << "Unable to copy merged data";
674 i : return NULL;
675 : }
676 : }
677 :
678 : // Now move all labels and references to the new block.
679 E : for (size_t i = 0; i < intersecting.size(); ++i) {
680 E : RelativeAddress addr = intersecting[i].first;
681 E : BlockGraph::Block* block = intersecting[i].second;
682 E : BlockGraph::Offset start_offset = addr - begin;
683 :
684 : // If the destination block is not a code block, preserve the old block
685 : // names as labels for debugging. We also need to make sure the label is
686 : // not empty, as that is verboten.
687 E : if (block_type != BlockGraph::CODE_BLOCK && !block->name().empty()) {
688 : new_block->SetLabel(start_offset,
689 : block->name(),
690 i : BlockGraph::DATA_LABEL);
691 : }
692 :
693 : // Move labels.
694 : BlockGraph::Block::LabelMap::const_iterator
695 E : label_it(block->labels().begin());
696 E : for (; label_it != block->labels().end(); ++label_it) {
697 : new_block->SetLabel(start_offset + label_it->first,
698 E : label_it->second);
699 E : }
700 :
701 : // Copy the reference map since we mutate the original.
702 E : BlockGraph::Block::ReferenceMap refs(block->references());
703 E : BlockGraph::Block::ReferenceMap::const_iterator ref_it(refs.begin());
704 E : for (; ref_it != refs.end(); ++ref_it) {
705 E : block->RemoveReference(ref_it->first);
706 E : new_block->SetReference(start_offset + ref_it->first, ref_it->second);
707 E : }
708 :
709 : // Redirect all referrers to the new block.
710 : block->TransferReferrers(start_offset,
711 : new_block,
712 E : BlockGraph::Block::kTransferInternalReferences);
713 :
714 : // Check that we've removed all references and
715 : // referrers from the original block.
716 E : DCHECK(block->references().empty());
717 E : DCHECK(block->referrers().empty());
718 :
719 : // Remove the original block.
720 E : bool removed = graph_->RemoveBlock(block);
721 E : DCHECK(removed);
722 E : }
723 :
724 E : return new_block;
725 E : }
726 :
727 E : bool BlockGraph::Section::set_name(const base::StringPiece& name) {
728 E : if (name == NULL)
729 i : return false;
730 :
731 E : if (name.empty())
732 i : return false;
733 :
734 E : name.CopyToString(&name_);
735 E : return true;
736 E : }
737 :
738 E : bool BlockGraph::Section::Save(core::OutArchive* out_archive) const {
739 E : DCHECK(out_archive != NULL);
740 : return out_archive->Save(id_) && out_archive->Save(name_) &&
741 E : out_archive->Save(characteristics_);
742 E : }
743 :
744 E : bool BlockGraph::Section::Load(core::InArchive* in_archive) {
745 E : DCHECK(in_archive != NULL);
746 : return in_archive->Load(&id_) && in_archive->Load(&name_) &&
747 E : in_archive->Load(&characteristics_);
748 E : }
749 :
750 E : std::string BlockGraph::Label::ToString() const {
751 : return base::StringPrintf("%s (%s)",
752 : name_.c_str(),
753 E : LabelAttributesToString(attributes_).c_str());
754 E : }
755 :
756 E : bool BlockGraph::Label::IsValid() const {
757 E : return AreValidAttributes(attributes_);
758 E : }
759 :
760 E : bool BlockGraph::Label::AreValidAttributes(LabelAttributes attributes) {
761 : // A label needs to have at least one attribute.
762 E : if (attributes == 0)
763 E : return false;
764 :
765 : // TODO(chrisha): Once we make the switch to VS2010 determine where call
766 : // site labels may land. Are they at the beginning of the call
767 : // instruction (in which case they may coincide with *_START_LABEL,
768 : // *_END_LABEL and CODE_LABEL), or do they point at the address of the
769 : // call (in which case they must be completely on their own)? For now, we
770 : // simply ignore them entirely from consideration.
771 E : attributes &= ~CALL_SITE_LABEL;
772 :
773 : // Public symbols can coincide with anything, so we can basically ignore
774 : // them.
775 E : attributes &= ~PUBLIC_SYMBOL_LABEL;
776 :
777 : // A code label can coincide with a debug and scope labels. (It can coincide
778 : // with *_END_LABEL labels because of 1-byte instructions, like RET or INT.)
779 : const LabelAttributes kCodeDebugScopeLabels =
780 : CODE_LABEL | DEBUG_START_LABEL | DEBUG_END_LABEL | SCOPE_START_LABEL |
781 E : SCOPE_END_LABEL;
782 : if ((attributes & CODE_LABEL) != 0 &&
783 E : (attributes & ~kCodeDebugScopeLabels) != 0) {
784 E : return false;
785 : }
786 :
787 : // A jump table must be paired with a data label. It may also be paired
788 : // with a debug-end label if tail-call optimization has been applied by
789 : // the compiler/linker.
790 : const LabelAttributes kJumpDataLabelAttributes =
791 E : JUMP_TABLE_LABEL | DATA_LABEL;
792 E : if (attributes & JUMP_TABLE_LABEL) {
793 E : if ((attributes & kJumpDataLabelAttributes) != kJumpDataLabelAttributes)
794 E : return false;
795 : // Filter out the debug-end label if present and check that nothing else
796 : // is set.
797 E : attributes &= ~DEBUG_END_LABEL;
798 E : if ((attributes & ~kJumpDataLabelAttributes) != 0)
799 i : return false;
800 E : return true;
801 : }
802 :
803 : // A case table must be paired with a data label and nothing else.
804 : const LabelAttributes kCaseDataLabelAttributes =
805 E : CASE_TABLE_LABEL | DATA_LABEL;
806 E : if (attributes & CASE_TABLE_LABEL) {
807 E : if ((attributes & kCaseDataLabelAttributes) != kCaseDataLabelAttributes)
808 E : return false;
809 E : if ((attributes & ~kCaseDataLabelAttributes) != 0)
810 i : return false;
811 E : return true;
812 : }
813 :
814 : // If there is no case or jump label, then a data label must be on its own.
815 E : if ((attributes & DATA_LABEL) != 0 && (attributes & ~DATA_LABEL) != 0)
816 i : return false;
817 :
818 E : return true;
819 E : }
820 :
821 : BlockGraph::Block::Block(BlockGraph* block_graph)
822 : : id_(0U),
823 : type_(BlockGraph::CODE_BLOCK),
824 : size_(0U),
825 : alignment_(1U),
826 : alignment_offset_(0),
827 : padding_before_(0U),
828 : name_(NULL),
829 : compiland_name_(NULL),
830 : addr_(RelativeAddress::kInvalidAddress),
831 : block_graph_(block_graph),
832 : section_(kInvalidSectionId),
833 : attributes_(0U),
834 : owns_data_(false),
835 : data_(NULL),
836 E : data_size_(0U) {
837 E : DCHECK(block_graph != NULL);
838 E : }
839 :
840 : BlockGraph::Block::Block(BlockId id,
841 : BlockType type,
842 : Size size,
843 : const base::StringPiece& name,
844 : BlockGraph* block_graph)
845 : : id_(id),
846 : type_(type),
847 : size_(size),
848 : alignment_(1U),
849 : alignment_offset_(0),
850 : padding_before_(0U),
851 : name_(NULL),
852 : compiland_name_(NULL),
853 : addr_(RelativeAddress::kInvalidAddress),
854 : block_graph_(block_graph),
855 : section_(kInvalidSectionId),
856 : attributes_(0U),
857 : owns_data_(false),
858 : data_(NULL),
859 E : data_size_(0U) {
860 E : DCHECK(block_graph != NULL);
861 E : set_name(name);
862 E : }
863 :
864 E : BlockGraph::Block::~Block() {
865 E : DCHECK(block_graph_ != NULL);
866 E : if (owns_data_)
867 E : delete [] data_;
868 E : }
869 :
870 E : void BlockGraph::Block::set_name(const base::StringPiece& name) {
871 E : DCHECK(block_graph_ != NULL);
872 : const std::string& interned_name =
873 E : block_graph_->string_table().InternString(name);
874 E : name_ = &interned_name;
875 E : }
876 :
877 E : const std::string& BlockGraph::Block::compiland_name() const {
878 E : DCHECK(block_graph_ != NULL);
879 E : if (compiland_name_ == NULL)
880 E : return block_graph_->string_table().InternString("");
881 E : return *compiland_name_;
882 E : }
883 :
884 E : void BlockGraph::Block::set_compiland_name(const base::StringPiece& name) {
885 E : DCHECK(block_graph_ != NULL);
886 : const std::string& interned_name =
887 E : block_graph_->string_table().InternString(name);
888 E : compiland_name_ = &interned_name;
889 E : }
890 :
891 E : uint8* BlockGraph::Block::AllocateRawData(size_t data_size) {
892 E : DCHECK_GT(data_size, 0u);
893 E : DCHECK_LE(data_size, size_);
894 :
895 E : uint8* new_data = new uint8[data_size];
896 E : if (!new_data)
897 i : return NULL;
898 :
899 E : if (owns_data()) {
900 i : DCHECK(data_ != NULL);
901 i : delete [] data_;
902 : }
903 :
904 E : data_ = new_data;
905 E : data_size_ = data_size;
906 E : owns_data_ = true;
907 :
908 E : return new_data;
909 E : }
910 :
911 : void BlockGraph::Block::InsertData(Offset offset,
912 : Size size,
913 E : bool always_allocate_data) {
914 E : DCHECK_GE(offset, 0);
915 E : DCHECK_LE(offset, static_cast<Offset>(size_));
916 :
917 E : if (size > 0) {
918 : // Patch up the block.
919 E : size_ += size;
920 E : ShiftOffsetItemMap(offset, size, &labels_);
921 E : ShiftReferences(this, offset, size);
922 E : ShiftReferrers(this, offset, size, &referrers_);
923 E : source_ranges_.InsertUnmappedRange(DataRange(offset, size));
924 :
925 : // Does this affect already allocated data?
926 E : if (static_cast<Size>(offset) < data_size_) {
927 : // Reallocate, shift the old data to the end, and zero out the new data.
928 E : size_t bytes_to_shift = data_size_ - offset;
929 E : ResizeData(data_size_ + size);
930 E : uint8* new_data = GetMutableData();
931 E : memmove(new_data + offset + size, new_data + offset, bytes_to_shift);
932 E : memset(new_data + offset, 0, size);
933 : }
934 : }
935 :
936 : // If we've been asked to, at least make sure that the data is allocated.
937 E : if (always_allocate_data && data_size_ < offset + size)
938 E : ResizeData(offset + size);
939 :
940 : return;
941 E : }
942 :
943 E : bool BlockGraph::Block::RemoveData(Offset offset, Size size) {
944 E : DCHECK_GE(offset, 0);
945 E : DCHECK_LE(offset, static_cast<Offset>(size_));
946 :
947 E : if (size == 0)
948 i : return true;
949 :
950 : // Ensure there are no labels in this range.
951 E : if (labels_.lower_bound(offset) != labels_.lower_bound(offset + size))
952 E : return false;
953 :
954 : // Ensure that there are no references intersecting this range.
955 E : ReferenceMap::const_iterator refc_it = references_.begin();
956 E : for (; refc_it != references_.end(); ++refc_it) {
957 E : if (refc_it->first >= static_cast<Offset>(offset + size))
958 E : break;
959 E : if (static_cast<Offset>(refc_it->first + refc_it->second.size()) > offset)
960 E : return false;
961 E : }
962 :
963 : // Ensure there are no referrers pointing to the data we want to remove.
964 E : ReferrerSet::const_iterator refr_it = referrers_.begin();
965 E : for (; refr_it != referrers_.end(); ++refr_it) {
966 E : Reference ref;
967 E : if (!refr_it->first->GetReference(refr_it->second, &ref)) {
968 i : LOG(ERROR) << "Unable to get reference from referrer.";
969 i : return false;
970 : }
971 : if (ref.offset() < static_cast<Offset>(offset + size) &&
972 E : static_cast<Offset>(ref.offset() + ref.size()) > offset) {
973 E : return false;
974 : }
975 E : }
976 :
977 : // Patch up the block.
978 E : size_ -= size;
979 E : ShiftOffsetItemMap(offset + size, -static_cast<int>(size), &labels_);
980 E : ShiftReferences(this, offset + size, -static_cast<int>(size));
981 E : ShiftReferrers(this, offset + size, -static_cast<int>(size), &referrers_);
982 E : source_ranges_.RemoveMappedRange(DataRange(offset, size));
983 :
984 : // Does this affect already allocated data?
985 E : if (static_cast<Size>(offset) < data_size_) {
986 E : size_t new_data_size = data_size_ - size;
987 : // Is there data beyond the section to delete?
988 E : if (static_cast<Size>(offset + size) < data_size_) {
989 : // Shift tail data to left.
990 E : uint8* data = GetMutableData();
991 E : size_t bytes_to_shift = data_size_ - offset - size;
992 E : size_t old_data_size = data_size_;
993 : memmove(data + new_data_size - bytes_to_shift,
994 : data + old_data_size - bytes_to_shift,
995 E : bytes_to_shift);
996 E : } else {
997 E : new_data_size = offset;
998 : }
999 E : ResizeData(new_data_size);
1000 : }
1001 :
1002 E : return true;
1003 E : }
1004 :
1005 : bool BlockGraph::Block::InsertOrRemoveData(Offset offset,
1006 : Size current_size,
1007 : Size new_size,
1008 E : bool always_allocate_data) {
1009 E : DCHECK_GE(offset, 0);
1010 E : DCHECK_LE(offset, static_cast<Offset>(size_));
1011 :
1012 : // If we're growing use InsertData.
1013 E : if (new_size > current_size) {
1014 E : Offset insert_offset = offset + current_size;
1015 E : Size insert_size = new_size - current_size;
1016 E : InsertData(insert_offset, insert_size, always_allocate_data);
1017 E : return true;
1018 : }
1019 :
1020 : // If we're shrinking we'll need to use RemoveData.
1021 E : if (new_size < current_size) {
1022 E : Offset remove_offset = offset + new_size;
1023 E : Size remove_size = current_size - new_size;
1024 E : if (!RemoveData(remove_offset, remove_size))
1025 i : return false;
1026 : // We fall through so that 'always_allocate_data' can be respected.
1027 : }
1028 :
1029 : // If we've been asked to, at least make sure that the data is allocated.
1030 E : if (always_allocate_data && data_size_ < offset + new_size)
1031 E : ResizeData(offset + new_size);
1032 :
1033 E : return true;
1034 E : }
1035 :
1036 E : void BlockGraph::Block::SetData(const uint8* data, size_t data_size) {
1037 : DCHECK((data_size == 0 && data == NULL) ||
1038 E : (data_size != 0 && data != NULL));
1039 E : DCHECK(data_size <= size_);
1040 :
1041 E : if (owns_data_)
1042 E : delete [] data_;
1043 :
1044 E : owns_data_ = false;
1045 E : data_ = data;
1046 E : data_size_ = data_size;
1047 E : }
1048 :
1049 E : uint8* BlockGraph::Block::AllocateData(size_t size) {
1050 E : uint8* new_data = AllocateRawData(size);
1051 E : if (new_data == NULL)
1052 i : return NULL;
1053 :
1054 E : ::memset(new_data, 0, size);
1055 E : return new_data;
1056 E : }
1057 :
1058 E : uint8* BlockGraph::Block::CopyData(size_t size, const void* data) {
1059 E : uint8* new_data = AllocateRawData(size);
1060 E : if (new_data == NULL)
1061 i : return NULL;
1062 :
1063 E : memcpy(new_data, data, size);
1064 E : return new_data;
1065 E : }
1066 :
1067 E : const uint8* BlockGraph::Block::ResizeData(size_t new_size) {
1068 E : if (new_size == data_size_)
1069 E : return data_;
1070 :
1071 E : if (!owns_data() && new_size < data_size_) {
1072 : // Not in our ownership and shrinking. We only need to adjust our length.
1073 E : data_size_ = new_size;
1074 E : } else {
1075 : // Either our own data, or it's growing (or both).
1076 E : uint8* new_data = NULL;
1077 :
1078 : // If the new size is non-zero we need to reallocate.
1079 E : if (new_size > 0) {
1080 E : new_data = new uint8[new_size];
1081 E : CHECK(new_data);
1082 :
1083 : // Copy the (head of the) old data.
1084 E : memcpy(new_data, data_, std::min(data_size_, new_size));
1085 E : if (new_size > data_size_) {
1086 : // Zero the tail.
1087 E : memset(new_data + data_size_, 0, new_size - data_size_);
1088 : }
1089 : }
1090 :
1091 E : if (owns_data())
1092 E : delete [] data_;
1093 :
1094 E : owns_data_ = true;
1095 E : data_ = new_data;
1096 E : data_size_ = new_size;
1097 : }
1098 :
1099 E : return data_;
1100 E : }
1101 :
1102 E : uint8* BlockGraph::Block::GetMutableData() {
1103 E : DCHECK_NE(0U, data_size_);
1104 E : DCHECK(data_ != NULL);
1105 :
1106 : // Make a copy if we don't already own the data.
1107 E : if (!owns_data()) {
1108 E : uint8* new_data = new uint8[data_size_];
1109 E : if (new_data == NULL)
1110 i : return NULL;
1111 E : memcpy(new_data, data_, data_size_);
1112 E : data_ = new_data;
1113 E : owns_data_ = true;
1114 : }
1115 E : DCHECK(owns_data_);
1116 :
1117 E : return const_cast<uint8*>(data_);
1118 E : }
1119 :
1120 E : bool BlockGraph::Block::HasExternalReferrers() const {
1121 E : ReferrerSet::const_iterator it = referrers().begin();
1122 E : for (; it != referrers().end(); ++it) {
1123 E : if (it->first != this)
1124 E : return true;
1125 E : }
1126 E : return false;
1127 E : }
1128 :
1129 E : bool BlockGraph::Block::SetReference(Offset offset, const Reference& ref) {
1130 E : DCHECK(ref.referenced() != NULL);
1131 :
1132 : // Non-code blocks can be referred to by pointers that lie outside of their
1133 : // extent (due to loop induction, arrays indexed with an implicit offset,
1134 : // etc). Code blocks can not be referred to in this manner, because references
1135 : // in code blocks must be places where the flow of execution actually lands.
1136 E : if (ref.referenced()->type() == CODE_BLOCK) {
1137 : DCHECK(ref.offset() >= 0 &&
1138 E : static_cast<size_t>(ref.offset()) <= ref.referenced()->size());
1139 E : DCHECK(offset + ref.size() <= size());
1140 : }
1141 :
1142 : #if defined(DEBUG) || !defined(NDEBUG)
1143 : {
1144 : // NOTE: It might be worthwhile making SetReference return true on success,
1145 : // and false on failure as it is possible for references to conflict.
1146 : // For now we simply check for conflicts in debug builds and die an
1147 : // unglorious death if we find any.
1148 :
1149 E : if (!ref.IsValid())
1150 i : NOTREACHED() << "Trying to insert invalid reference.";
1151 :
1152 : // Examine references before us that could possibly conflict with us.
1153 E : Offset offset_begin = offset - Reference::kMaximumSize + 1;
1154 : ReferenceMap::const_iterator it =
1155 E : references_.lower_bound(offset_begin);
1156 E : for (; it != references_.end() && it->first < offset; ++it) {
1157 E : if (static_cast<Offset>(it->first + it->second.size()) > offset)
1158 i : NOTREACHED() << "Trying to insert conflicting reference.";
1159 E : }
1160 :
1161 : // Skip the reference at the same offset if there is one. This reference
1162 : // will be replaced by the new one.
1163 E : if (it != references_.end() && it->first == offset)
1164 E : ++it;
1165 :
1166 : // This is the first reference after our offset. Check to see if it lands
1167 : // within the range we want to occupy.
1168 : if (it != references_.end() &&
1169 E : it->first < static_cast<Offset>(offset + ref.size())) {
1170 i : NOTREACHED() << "Trying to insert conflicting reference.";
1171 : }
1172 : }
1173 : #endif
1174 :
1175 : // Did we have an earlier reference at this location?
1176 E : ReferenceMap::iterator it(references_.find(offset));
1177 E : bool inserted = false;
1178 E : if (it != references_.end()) {
1179 : // Erase the back reference.
1180 E : BlockGraph::Block* referenced = it->second.referenced();
1181 E : Referrer referrer(this, offset);
1182 E : size_t removed = referenced->referrers_.erase(referrer);
1183 E : DCHECK_EQ(1U, removed);
1184 :
1185 : // Lastly switch the reference.
1186 E : it->second = ref;
1187 E : } else {
1188 : // It's a new reference, insert it.
1189 E : inserted = references_.insert(std::make_pair(offset, ref)).second;
1190 E : DCHECK(inserted);
1191 : }
1192 :
1193 : // Record the back-reference.
1194 E : ref.referenced()->referrers_.insert(std::make_pair(this, offset));
1195 :
1196 E : return inserted;
1197 E : }
1198 :
1199 : bool BlockGraph::Block::GetReference(Offset offset,
1200 E : Reference* reference) const {
1201 E : DCHECK(reference != NULL);
1202 E : ReferenceMap::const_iterator it(references_.find(offset));
1203 E : if (it == references_.end())
1204 E : return false;
1205 :
1206 E : *reference = it->second;
1207 E : return true;
1208 E : }
1209 :
1210 E : bool BlockGraph::Block::RemoveReference(Offset offset) {
1211 : // Do we have reference at this location?
1212 E : ReferenceMap::iterator it(references_.find(offset));
1213 E : if (it == references_.end())
1214 i : return false;
1215 :
1216 E : BlockGraph::Block* referenced = it->second.referenced();
1217 E : Referrer referrer(this, offset);
1218 E : size_t removed = referenced->referrers_.erase(referrer);
1219 E : DCHECK_EQ(1U, removed);
1220 E : references_.erase(it);
1221 :
1222 E : return true;
1223 E : }
1224 :
1225 E : bool BlockGraph::Block::RemoveAllReferences() {
1226 E : ReferenceMap::iterator it = references_.begin();
1227 E : while (it != references_.end()) {
1228 E : ReferenceMap::iterator to_remove = it;
1229 E : ++it;
1230 :
1231 : // TODO(rogerm): As an optimization, we don't need to drop intra-block
1232 : // references when disconnecting from the block_graph. Consider having
1233 : // BlockGraph::RemoveBlockByIterator() check that the block has no
1234 : // external referrers before calling this function and erasing the
1235 : // block.
1236 :
1237 : // Unregister this reference from the referred block then erase it.
1238 E : BlockGraph::Block* referenced = to_remove->second.referenced();
1239 E : Referrer referrer(this, to_remove->first);
1240 E : size_t removed = referenced->referrers_.erase(referrer);
1241 E : DCHECK_EQ(1U, removed);
1242 E : references_.erase(to_remove);
1243 E : }
1244 :
1245 E : return true;
1246 E : }
1247 :
1248 E : bool BlockGraph::Block::SetLabel(Offset offset, const Label& label) {
1249 E : DCHECK_LE(0, offset);
1250 E : DCHECK_LE(static_cast<size_t>(offset), size_);
1251 :
1252 E : VLOG(2) << name() << ": adding "
1253 : << LabelAttributesToString(label.attributes()) << " label '"
1254 : << label.name() << "' at offset " << offset << ".";
1255 :
1256 : // Try inserting the label into the label map.
1257 : std::pair<LabelMap::iterator, bool> result(
1258 E : labels_.insert(std::make_pair(offset, label)));
1259 :
1260 : // If it was freshly inserted then we're done.
1261 E : if (result.second)
1262 E : return true;
1263 :
1264 E : return false;
1265 E : }
1266 :
1267 E : bool BlockGraph::Block::GetLabel(Offset offset, Label* label) const {
1268 E : DCHECK(offset >= 0 && static_cast<size_t>(offset) <= size_);
1269 E : DCHECK(label != NULL);
1270 :
1271 E : LabelMap::const_iterator it = labels_.find(offset);
1272 E : if (it == labels_.end())
1273 E : return false;
1274 :
1275 E : *label = it->second;
1276 E : return true;
1277 E : }
1278 :
1279 E : bool BlockGraph::Block::RemoveLabel(Offset offset) {
1280 E : DCHECK(offset >= 0 && static_cast<size_t>(offset) <= size_);
1281 :
1282 E : return labels_.erase(offset) == 1;
1283 E : }
1284 :
1285 E : bool BlockGraph::Block::HasLabel(Offset offset) const {
1286 E : DCHECK(offset >= 0 && static_cast<size_t>(offset) <= size_);
1287 :
1288 E : return labels_.find(offset) != labels_.end();
1289 E : }
1290 :
1291 : bool BlockGraph::Block::TransferReferrers(Offset offset,
1292 E : Block* new_block, TransferReferrersFlags flags) {
1293 : // Redirect all referrers to the new block, we copy the referrer set
1294 : // because it is otherwise mutated during iteration.
1295 E : BlockGraph::Block::ReferrerSet referrers = referrers_;
1296 E : BlockGraph::Block::ReferrerSet::const_iterator referrer_it(referrers.begin());
1297 :
1298 E : for (; referrer_it != referrers.end(); ++referrer_it) {
1299 : // Get the original reference.
1300 E : BlockGraph::Block::Referrer referrer = *referrer_it;
1301 : BlockGraph::Block::ReferenceMap::const_iterator found_ref(
1302 E : referrer.first->references().find(referrer.second));
1303 E : DCHECK(found_ref != referrer.first->references().end());
1304 E : BlockGraph::Reference ref(found_ref->second);
1305 :
1306 E : if ((flags & kSkipInternalReferences) != 0 && referrer.first == this)
1307 i : continue;
1308 :
1309 E : Offset new_offset = ref.offset() + offset;
1310 E : Offset new_base = ref.base() + offset;
1311 :
1312 : // Same thing as in SetReferrer, references to non-code blocks may lie
1313 : // outside the extent of the block.
1314 E : if (type_ == CODE_BLOCK) {
1315 : if (new_offset < 0 ||
1316 E : static_cast<size_t>(new_offset) > new_block->size()) {
1317 E : LOG(ERROR) << "Transferred reference lies outside of code block.";
1318 E : return false;
1319 : }
1320 : }
1321 :
1322 : // Redirect the reference to the new block with the adjusted offset.
1323 : BlockGraph::Reference new_ref(ref.type(),
1324 : ref.size(),
1325 : new_block,
1326 : new_offset,
1327 E : new_base);
1328 E : referrer.first->SetReference(referrer.second, new_ref);
1329 E : }
1330 :
1331 E : return true;
1332 E : }
1333 :
1334 : // Returns true if this block contains the given range of bytes.
1335 i : bool BlockGraph::Block::Contains(RelativeAddress address, size_t size) const {
1336 i : return (address >= addr_ && address + size <= addr_ + size_);
1337 i : }
1338 :
1339 E : bool BlockGraph::Reference::IsValid() const {
1340 : // We can't reference a NULL block.
1341 E : if (referenced_ == NULL)
1342 E : return false;
1343 :
1344 : // First see if the base address is valid for the referenced block. Base
1345 : // addresses must track an existing position in the block, between zero
1346 : // (beginning of the block), and one past the last byte (end of the
1347 : // block). These are the same offsets that are valid for InsertData(),
1348 : // such that inserting data at that position or before would shift the
1349 : // reference along. Hence, an end reference (one past the size of the
1350 : // block) would always be shifted regardless of the point of insertion;
1351 : // conversely, a reference to address zero would never move.
1352 E : if (base_ < 0 || static_cast<size_t>(base_) > referenced_->size())
1353 i : return false;
1354 :
1355 E : if (!IsValidTypeSize(type_, size_))
1356 i : return false;
1357 :
1358 E : return true;
1359 E : }
1360 :
1361 E : bool BlockGraph::Reference::IsValidTypeSize(ReferenceType type, Size size) {
1362 E : switch (type & ~RELOC_REF_BIT) {
1363 : // We see 8- and 32-bit relative JMPs.
1364 : case PC_RELATIVE_REF:
1365 E : return size == 1 || size == 4;
1366 :
1367 : // These guys are all pointer sized.
1368 : case ABSOLUTE_REF:
1369 : case RELATIVE_REF:
1370 : case FILE_OFFSET_REF:
1371 E : return size == 4;
1372 :
1373 : case SECTION_REF:
1374 E : return size == 2;
1375 : case SECTION_OFFSET_REF:
1376 E : return size == 1 || size == 4;
1377 :
1378 : default:
1379 i : NOTREACHED() << "Unknown ReferenceType.";
1380 : }
1381 :
1382 i : return false;
1383 E : }
1384 :
1385 : } // namespace block_graph
|