1 : // Copyright 2015 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/memory_interceptors_patcher.h"
16 :
17 : #include <map>
18 :
19 : #include "base/win/pe_image.h"
20 : #include "syzygy/agent/asan/memory_interceptors.h"
21 : #include "syzygy/agent/asan/scoped_page_protections.h"
22 : #include "syzygy/agent/asan/shadow.h"
23 : #include "syzygy/common/align.h"
24 :
25 : // The linker satisfies this symbol. This gets us a pointer to our own module
26 : // when we're loaded.
27 : extern "C" IMAGE_DOS_HEADER __ImageBase;
28 :
29 : namespace agent {
30 : namespace asan {
31 :
32 : namespace {
33 :
34 : static const char kProbesSectionName[] = ".probes";
35 : static const char kReadOnlySectionName[] = ".rdata";
36 :
37 : // Gets the extents of the given section, writing them to |begin| and |end|.
38 : // Returns true on success, false otherwise. Logs verbosely on failure.
39 : bool GetSectionExtents(const base::win::PEImage& image,
40 : const char* section_name,
41 : uint8_t** begin,
42 E : uint8_t** end) {
43 E : DCHECK_NE(static_cast<char*>(nullptr), section_name);
44 E : DCHECK_NE(static_cast<uint8_t**>(nullptr), begin);
45 E : DCHECK_NE(static_cast<uint8_t**>(nullptr), end);
46 :
47 : const IMAGE_SECTION_HEADER* section_header =
48 E : image.GetImageSectionHeaderByName(section_name);
49 E : if (section_header == nullptr) {
50 i : LOG(ERROR) << "Image does not contain a " << kProbesSectionName
51 : << " section.";
52 i : return false;
53 : }
54 : *begin = reinterpret_cast<uint8_t*>(image.GetDosHeader()) +
55 E : section_header->VirtualAddress;
56 E : *end = *begin + section_header->Misc.VirtualSize;
57 :
58 E : return true;
59 E : }
60 :
61 : // An exception filter that triggers only for access violations.
62 i : DWORD AccessViolationFilter(EXCEPTION_POINTERS* e) {
63 i : if (e->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
64 i : return EXCEPTION_EXECUTE_HANDLER;
65 i : return EXCEPTION_CONTINUE_SEARCH;
66 i : }
67 :
68 : // Return status used by WritePointerImpl.
69 : enum WritePointerStatus {
70 : kWritePointerSuccess,
71 : kWritePointerUnexpectedPreviousValue,
72 : kWritePointerAccessViolation,
73 : };
74 :
75 : // Safely writes the given value to the given address under an exception
76 : // handler. If this fails with an access violation then it silently swallows
77 : // the exception. Returns a detailed status.
78 : WritePointerStatus WritePointerImpl(const void* expected_old_value,
79 : const void* value,
80 i : volatile void** address) {
81 i : DCHECK_NE(static_cast<void*>(nullptr), address);
82 :
83 : // The value to be written is not necessarily pointer aligned and may require
84 : // two writes. Determine the bounds of pointer aligned data to be written.
85 i : volatile uint8_t* dst = reinterpret_cast<volatile uint8_t*>(address);
86 i : volatile uint8_t* dst_begin = ::common::AlignDown(dst, sizeof(uintptr_t));
87 : volatile uint8_t* dst_end =
88 i : ::common::AlignUp(dst + sizeof(uintptr_t), sizeof(uintptr_t));
89 i : size_t offset = reinterpret_cast<uintptr_t>(address) % sizeof(uintptr_t);
90 :
91 : // Copy the original range of bytes. This will serve as a template for
92 : // reading and writing.
93 i : uintptr_t old_values[2] = {};
94 i : uintptr_t new_values[2] = {};
95 i : ::memcpy(old_values, const_cast<uint8_t*>(dst_begin), dst_end - dst_begin);
96 i : ::memcpy(new_values, old_values, sizeof(old_values));
97 :
98 : // The data we copied should have the expected original pointer, otherwise
99 : // somebody has been tinkering at the same time as us.
100 : void* copied_old_value = *reinterpret_cast<void**>(
101 i : reinterpret_cast<uint8_t*>(old_values) + offset);
102 i : if (copied_old_value != expected_old_value)
103 i : return kWritePointerUnexpectedPreviousValue;
104 :
105 : // Stamp the new value into the template.
106 : *reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(new_values) + offset) =
107 i : const_cast<void*>(value);
108 :
109 : // Up until now everything has been 'safe' reads. Stamp in the new data,
110 : // but use interlocked operations to be extra careful.
111 i : __try {
112 : uintptr_t old_value = ::InterlockedCompareExchange(
113 : reinterpret_cast<volatile uintptr_t*>(dst_begin),
114 i : new_values[0], old_values[0]);
115 i : if (old_value != old_values[0])
116 i : return kWritePointerUnexpectedPreviousValue;
117 :
118 : // If no second write is required (the actual pointer value being written
119 : // was aligned) then the write is complete.
120 i : if (dst_end - dst_begin == sizeof(uintptr_t))
121 i : return kWritePointerSuccess;
122 :
123 : // Otherwise try to write the second half of the pointer.
124 : old_value = ::InterlockedCompareExchange(
125 : reinterpret_cast<volatile uintptr_t*>(dst_begin) + 1,
126 i : new_values[1], old_values[1]);
127 i : if (old_value != old_values[1])
128 i : return kWritePointerUnexpectedPreviousValue;
129 i : } __except (AccessViolationFilter(GetExceptionInformation())) { // NOLINT
130 i : return kWritePointerAccessViolation;
131 i : }
132 :
133 i : return kWritePointerSuccess;
134 i : }
135 :
136 : // Safely writes the given value to the given address under an exception
137 : // handler. Returns true on succes, false on failure. Logs verbosely on
138 : // failure.
139 : bool WritePointer(const void* expected_old_value,
140 : const void* value,
141 E : volatile void** address) {
142 E : DCHECK_NE(static_cast<void*>(nullptr), address);
143 :
144 : WritePointerStatus status =
145 E : WritePointerImpl(expected_old_value, value, address);
146 E : switch (status) {
147 : case kWritePointerSuccess:
148 E : return true;
149 : case kWritePointerUnexpectedPreviousValue: {
150 i : LOG(ERROR) << "Unexpected previous value. Racy write to this location?";
151 i : return false;
152 : }
153 : case kWritePointerAccessViolation: {
154 i : LOG(ERROR) << "Access violation during write. Racy protection changes?";
155 i : return false;
156 : }
157 : }
158 :
159 i : NOTREACHED();
160 i : return false;
161 E : }
162 :
163 : bool PatchMemoryInterceptorShadowReferencesInternalImpl(
164 : HMODULE module,
165 : const uint8_t* current_shadow_memory,
166 : const void** shadow_memory_references,
167 : const uint8_t* new_shadow_memory,
168 E : ScopedPageProtections* scoped_page_protections) {
169 E : DCHECK_NE(static_cast<HMODULE>(nullptr), module);
170 E : DCHECK_NE(static_cast<uint8_t*>(nullptr), current_shadow_memory);
171 E : DCHECK_NE(static_cast<void**>(nullptr), shadow_memory_references);
172 E : DCHECK_NE(static_cast<uint8_t*>(nullptr), new_shadow_memory);
173 : DCHECK_NE(static_cast<ScopedPageProtections*>(nullptr),
174 E : scoped_page_protections);
175 :
176 E : base::win::PEImage image(module);
177 E : if (!image.VerifyMagic()) {
178 i : LOG(ERROR) << "Does not appear to be a valid image handle.";
179 i : return false;
180 : }
181 :
182 E : uint8_t* probes_begin = nullptr;
183 E : uint8_t* probes_end = nullptr;
184 : if (!GetSectionExtents(image, kProbesSectionName, &probes_begin,
185 E : &probes_end)) {
186 i : return false;
187 : }
188 :
189 E : uint8_t* rdata_begin = nullptr;
190 E : uint8_t* rdata_end = nullptr;
191 : if (!GetSectionExtents(image, kReadOnlySectionName, &rdata_begin,
192 E : &rdata_end)) {
193 i : return false;
194 : }
195 :
196 : // Iterate over the shadow memory references and patch them.
197 : uint8_t** cursor = reinterpret_cast<uint8_t**>(const_cast<void**>(
198 E : shadow_memory_references));
199 E : for (; *cursor != nullptr; ++cursor) {
200 : // Ensure the table entry itself is within the .rdata section.
201 : if (reinterpret_cast<uint8_t*>(cursor) < rdata_begin ||
202 E : reinterpret_cast<uint8_t*>(cursor) + sizeof(uintptr_t) >= rdata_end) {
203 i : LOG(ERROR) << "Shadow reference table entry is outside of "
204 : << kProbesSectionName << " section";
205 i : return false;
206 : }
207 :
208 : // Ensure the reference is within the probes section.
209 E : if (*cursor < probes_begin || *cursor + sizeof(uintptr_t) >= probes_end) {
210 i : LOG(ERROR) << "Shadow reference is outside of " << kProbesSectionName
211 : << " section";
212 i : return false;
213 : }
214 :
215 : // The shadow reference must be a direct pointer to the current shadow.
216 : volatile uint8_t** shadow_ref =
217 E : reinterpret_cast<volatile uint8_t**>(*cursor);
218 E : if (*shadow_ref != current_shadow_memory) {
219 : // In the general case the offsets may be anything. However, given how the
220 : // probes are currently generated the offsets must be zero.
221 E : LOG(ERROR) << "Invalid shadow memory reference.";
222 E : return false;
223 : }
224 :
225 : // Update the shadow memory reference to point to the new shadow memory.
226 : if (!scoped_page_protections->EnsureContainingPagesWritable(
227 E : shadow_ref, sizeof(*shadow_ref))) {
228 i : LOG(ERROR) << "Failed to make page writable.";
229 i : return false;
230 : }
231 : if (!WritePointer(current_shadow_memory, new_shadow_memory,
232 E : reinterpret_cast<volatile void**>(shadow_ref))) {
233 i : return false;
234 : }
235 E : }
236 :
237 E : return true;
238 E : }
239 :
240 : bool PatchMemoryInterceptorShadowReferencesImpl(
241 : HMODULE module,
242 : const uint8_t* current_shadow_memory,
243 : const void** shadow_memory_references,
244 E : const uint8_t* new_shadow_memory) {
245 E : DCHECK_NE(static_cast<HMODULE>(nullptr), module);
246 E : DCHECK_NE(static_cast<uint8_t*>(nullptr), current_shadow_memory);
247 E : DCHECK_NE(static_cast<void**>(nullptr), shadow_memory_references);
248 E : DCHECK_NE(static_cast<uint8_t*>(nullptr), new_shadow_memory);
249 :
250 E : bool did_succeed = true;
251 E : ScopedPageProtections scoped_page_protections;
252 : if (!PatchMemoryInterceptorShadowReferencesInternalImpl(
253 : module, current_shadow_memory, shadow_memory_references,
254 E : new_shadow_memory, &scoped_page_protections)) {
255 E : did_succeed = false;
256 : }
257 :
258 : // Try hard to restore the page protections.
259 E : bool protections_restored = false;
260 E : for (size_t i = 0; i < 3; ++i) {
261 E : if (scoped_page_protections.RestorePageProtections()) {
262 E : protections_restored = true;
263 E : break;
264 : }
265 i : }
266 E : if (!protections_restored)
267 i : did_succeed = false;
268 :
269 E : return did_succeed;
270 E : }
271 :
272 : } // namespace
273 :
274 : bool PatchMemoryInterceptorShadowReferences(const uint8_t* old_shadow_memory,
275 E : const uint8_t* new_shadow_memory) {
276 E : DCHECK_NE(static_cast<uint8_t*>(nullptr), new_shadow_memory);
277 : if (!PatchMemoryInterceptorShadowReferencesImpl(
278 : reinterpret_cast<HMODULE>(&__ImageBase), old_shadow_memory,
279 E : asan_shadow_references, new_shadow_memory)) {
280 E : return false;
281 : }
282 E : return true;
283 E : }
284 :
285 : } // namespace asan
286 : } // namespace agent
|