1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/block_graph/block_util.h"
16 :
17 : #include <algorithm>
18 : #include <vector>
19 :
20 : #include "mnemonics.h" // NOLINT
21 :
22 : namespace block_graph {
23 :
24 : bool GetBasicBlockSourceRange(const BasicCodeBlock& bb,
25 E : BlockGraph::Block::SourceRange* source_range) {
26 E : DCHECK(source_range != NULL);
27 :
28 : typedef BlockGraph::Block::SourceRange SourceRange;
29 E : std::vector<SourceRange> ranges;
30 :
31 : // Collect all the instruction and successor source ranges.
32 E : BasicBlock::Instructions::const_iterator inst_it(bb.instructions().begin());
33 E : for (; inst_it != bb.instructions().end(); ++inst_it) {
34 E : const SourceRange& range = inst_it->source_range();
35 E : if (range.size() > 0)
36 E : ranges.push_back(range);
37 E : }
38 E : BasicBlock::Successors::const_iterator succ_it(bb.successors().begin());
39 E : for (; succ_it != bb.successors().end(); ++succ_it) {
40 E : const SourceRange& range = succ_it->source_range();
41 E : if (range.size() > 0)
42 E : ranges.push_back(range);
43 E : }
44 :
45 E : if (ranges.empty())
46 E : return false;
47 :
48 : // Sort the ranges.
49 E : std::sort(ranges.begin(), ranges.end());
50 :
51 : // Test that they're all contiguous, while computing their total length.
52 E : SourceRange::Size size = ranges[0].size();
53 E : for (size_t i = 0; i < ranges.size() - 1; ++i) {
54 E : size += ranges[i + 1].size();
55 E : if (ranges[i].start() + ranges[i].size() != ranges[i + 1].start())
56 E : return false;
57 E : }
58 E : *source_range = SourceRange(ranges[0].start(), size);
59 :
60 E : return true;
61 E : }
62 :
63 : bool IsUnsafeReference(const BlockGraph::Block* referrer,
64 E : const BlockGraph::Reference& ref) {
65 : // Skip references with a non-zero offset if we're
66 : // not instrumenting unsafe references.
67 E : if (ref.offset() != 0)
68 E : return true;
69 :
70 : BlockGraph::BlockAttributes kUnsafeAttribs =
71 : BlockGraph::HAS_INLINE_ASSEMBLY |
72 E : BlockGraph::BUILT_BY_UNSUPPORTED_COMPILER;
73 :
74 E : bool unsafe_referrer = false;
75 : if (referrer->type() == BlockGraph::CODE_BLOCK &&
76 E : (referrer->attributes() & kUnsafeAttribs) != 0) {
77 E : unsafe_referrer = true;
78 : }
79 :
80 E : DCHECK_EQ(BlockGraph::CODE_BLOCK, ref.referenced()->type());
81 E : bool unsafe_block = (ref.referenced()->attributes() & kUnsafeAttribs) != 0;
82 :
83 : // If both the referrer and the referenced blocks are unsafe, we can't
84 : // safely assume that this reference represents a call semantics,
85 : // e.g. where a return address is at the top of stack at entry.
86 : // Ideally we'd decide this on the basis of a full stack analysis, but
87 : // beggers can't be choosers, plus for hand-coded assembly that's
88 : // the halting problem :).
89 : // For instrumentation that uses return address swizzling, instrumenting
90 : // an unsafe reference leads to crashes, so better to back off and get
91 : // slightly less coverage.
92 E : return unsafe_referrer && unsafe_block;
93 E : }
94 :
95 : bool HasUnexpectedStackFrameManipulation(
96 E : block_graph::BasicBlockSubGraph* subgraph) {
97 E : DCHECK(subgraph != NULL);
98 :
99 : BasicBlockSubGraph::BBCollection::iterator it =
100 E : subgraph->basic_blocks().begin();
101 :
102 : // Process each basic block to find an invalid stack manipulation.
103 E : for (; it != subgraph->basic_blocks().end(); ++it) {
104 E : BasicCodeBlock* bb = BasicCodeBlock::Cast(*it);
105 E : if (bb == NULL)
106 E : continue;
107 :
108 : // Process each instruction.
109 E : BasicBlock::Instructions::iterator iter_inst = bb->instructions().begin();
110 E : for (; iter_inst != bb->instructions().end(); ++iter_inst) {
111 E : const _DInst& repr = iter_inst->representation();
112 :
113 : // Consider only instructions whose first operand is EBP (read/write).
114 E : if (repr.ops[0].type == O_REG && repr.ops[0].index == R_EBP) {
115 : // PUSH/POP EBP is valid.
116 E : if (repr.opcode == I_POP || repr.opcode == I_PUSH)
117 E : continue;
118 :
119 : // MOV EBP, ESP is valid.
120 : if (repr.opcode == I_MOV &&
121 E : repr.ops[1].type == O_REG && repr.ops[1].index == R_ESP)
122 E : continue;
123 :
124 : // We found a non conventional write to register EBP (stack pointer).
125 E : return true;
126 : }
127 E : }
128 E : }
129 :
130 : // There is no unconventional/unexpected stack frame manipulation.
131 E : return false;
132 E : }
133 :
134 : bool GetJumpTableSize(const BlockGraph::Block* block,
135 : BlockGraph::Block::LabelMap::const_iterator jump_table_label,
136 E : size_t* table_size) {
137 E : DCHECK(block != NULL);
138 E : DCHECK(table_size != NULL);
139 : DCHECK(block->HasLabel(jump_table_label->first) &&
140 E : block->labels().at(jump_table_label->first) == jump_table_label->second);
141 :
142 : // Ensure that this label has the jump table attribute.
143 E : if (!jump_table_label->second.has_attributes(BlockGraph::JUMP_TABLE_LABEL)) {
144 i : LOG(ERROR) << "This label doesn't have the jump table attribute.";
145 i : return false;
146 : }
147 :
148 E : BlockGraph::Offset beginning_offset = jump_table_label->first;
149 E : BlockGraph::Offset end_offset = beginning_offset;
150 :
151 : // The maximum end offset for this jump table is either the offset of the next
152 : // label or the end of this block.
153 E : BlockGraph::Offset max_end_offset = 0;
154 E : BlockGraph::Block::LabelMap::const_iterator next_label = ++(jump_table_label);
155 E : if (next_label != block->labels().end())
156 E : max_end_offset = next_label->first;
157 E : else
158 i : max_end_offset = block->size();
159 :
160 E : DCHECK_NE(0, max_end_offset);
161 :
162 : BlockGraph::Block::ReferenceMap::const_iterator iter_ref =
163 E : block->references().find(beginning_offset);
164 E : DCHECK(iter_ref != block->references().end());
165 E : DCHECK(iter_ref->first == beginning_offset);
166 : DCHECK(iter_ref->second.type() == BlockGraph::ABSOLUTE_REF ||
167 E : iter_ref->second.type() == BlockGraph::RELATIVE_REF);
168 E : DCHECK_EQ(BlockGraph::Reference::kMaximumSize, iter_ref->second.size());
169 :
170 E : BlockGraph::Size reference_size = iter_ref->second.size();
171 :
172 : // Iterates over the references to calculate the size of this jump table, we
173 : // stop once we reach the maximum end offset or as soon as we find a reference
174 : // that is not contiguous to the previous one.
175 E : while (end_offset < max_end_offset) {
176 E : end_offset += reference_size;
177 E : ++iter_ref;
178 E : if (iter_ref == block->references().end() || iter_ref->first != end_offset)
179 E : break;
180 E : DCHECK_EQ(end_offset, iter_ref->first);
181 : DCHECK(iter_ref->second.type() == BlockGraph::ABSOLUTE_REF ||
182 E : iter_ref->second.type() == BlockGraph::RELATIVE_REF);
183 E : DCHECK_EQ(BlockGraph::Reference::kMaximumSize, iter_ref->second.size());
184 E : }
185 :
186 : *table_size = (end_offset - beginning_offset) /
187 E : BlockGraph::Reference::kMaximumSize;
188 :
189 E : return true;
190 E : }
191 :
192 : } // namespace block_graph
|