1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 : //
15 : // Implementation details for PageAllocator. This is not meant to be
16 : // included directly.
17 :
18 : #ifndef SYZYGY_AGENT_ASAN_PAGE_ALLOCATOR_IMPL_H_
19 : #define SYZYGY_AGENT_ASAN_PAGE_ALLOCATOR_IMPL_H_
20 :
21 : #include <windows.h>
22 :
23 : #include <algorithm>
24 :
25 : #include "base/logging.h"
26 : #include "syzygy/agent/asan/constants.h"
27 : #include "syzygy/common/align.h"
28 :
29 : namespace agent {
30 : namespace asan {
31 :
32 : // Empty statistics helper.
33 : template<> struct PageAllocatorStatisticsHelper<false> {
34 E : void Lock() { }
35 E : void Unlock() { }
36 E : template<size_t PageAllocatorStatistics::*stat> void Increment(size_t) { }
37 E : template<size_t PageAllocatorStatistics::*stat> void Decrement(size_t) { }
38 : void GetStatistics(PageAllocatorStatistics* stats) const {
39 : DCHECK_NE(static_cast<PageAllocatorStatistics*>(NULL), stats);
40 : ::memset(stats, 0, sizeof(*stats));
41 : }
42 : };
43 :
44 : // Actual statistics helper.
45 : template<> struct PageAllocatorStatisticsHelper<true> {
46 E : PageAllocatorStatisticsHelper() {
47 E : ::memset(&stats, 0, sizeof(stats));
48 E : }
49 :
50 E : void Lock() { lock.Acquire(); }
51 E : void Unlock() { lock.Release(); }
52 :
53 : template<size_t PageAllocatorStatistics::*member>
54 E : void Increment(size_t amount) {
55 E : lock.AssertAcquired();
56 E : stats.*member += amount;
57 E : }
58 :
59 : template<size_t PageAllocatorStatistics::*member>
60 E : void Decrement(size_t amount) {
61 E : lock.AssertAcquired();
62 E : stats.*member -= amount;
63 E : }
64 :
65 : void GetStatistics(PageAllocatorStatistics* stats) const {
66 : lock.AssertAcquired();
67 : DCHECK_NE(static_cast<PageAllocatorStatistics*>(NULL), stats);
68 : *stats = this->stats;
69 : }
70 :
71 : base::Lock lock;
72 : PageAllocatorStatistics stats;
73 : };
74 :
75 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
76 : bool kKeepStats>
77 : PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
78 : PageAllocator()
79 E : : current_page_(NULL), current_object_(NULL), end_object_(NULL) {
80 : COMPILE_ASSERT(kObjectSize >= sizeof(uintptr_t), object_size_too_small);
81 :
82 : // There needs to be at least one object per page, and extra bytes for a
83 : // linked list pointer.
84 : page_size_ = std::max<size_t>(kPageSize,
85 E : kObjectSize + sizeof(void*));
86 : // Round this up to a multiple of the OS page size.
87 E : page_size_ = ::common::AlignUp(page_size_, agent::asan::GetPageSize());
88 :
89 E : objects_per_page_ = (page_size_ - sizeof(void*)) / kObjectSize;
90 :
91 : // Clear the freelists.
92 E : ::memset(free_, 0, sizeof(free_));
93 E : }
94 :
95 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
96 : bool kKeepStats>
97 : PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
98 E : ~PageAllocator() {
99 : // Returns all pages to the OS.
100 E : uint8* page = current_page_;
101 E : while (page) {
102 E : uint8* prev = page + page_size_ - sizeof(void*);
103 E : uint8* next_page = *reinterpret_cast<uint8**>(prev);
104 E : CHECK_EQ(TRUE, ::VirtualFree(page, 0, MEM_RELEASE));
105 E : page = next_page;
106 E : }
107 E : }
108 :
109 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
110 : bool kKeepStats>
111 : void* PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
112 E : Allocate(size_t count) {
113 E : size_t received = 0;
114 E : void* alloc = Allocate(count, &received);
115 :
116 : // If there were leftover objects in the allocation then shard it and
117 : // add them to the appropriate free list.
118 E : if (count < received) {
119 E : size_t n = received - count;
120 : uint8* remaining = reinterpret_cast<uint8*>(alloc) +
121 E : kObjectSize * count;
122 : // These objects are part of an active allocation that are being returned.
123 : // Thus we don't decrement the number of allocated groups, but we do
124 : // decrement the number of allocated objects.
125 E : FreePush(remaining, n, false, true);
126 : }
127 :
128 E : return alloc;
129 E : }
130 :
131 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
132 : bool kKeepStats>
133 : void* PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
134 E : Allocate(size_t count, size_t* received) {
135 E : DCHECK_LT(0u, count);
136 E : DCHECK_GE(kMaxObjectCount, count);
137 E : DCHECK_NE(static_cast<size_t*>(NULL), received);
138 :
139 E : uint8* object = NULL;
140 :
141 : // Look to the lists of freed objects and try to use one of those. Use the
142 : // first one that's big enough, and stuff the leftover objects into another
143 : // freed list.
144 E : for (size_t n = count; n <= kMaxObjectCount; ++n) {
145 : // This is racy and can end up lying to us. However, it's faster to first
146 : // check this outside of the lock. We do this properly afterwards.
147 E : if (free_[n - 1] == NULL)
148 E : continue;
149 :
150 : // Unlink the objects from the free list of size n.
151 E : object = FreePop(n);
152 E : if (object == NULL)
153 i : continue;
154 :
155 : // Update statistics.
156 E : stats_.Lock();
157 E : stats_.Increment<&PageAllocatorStatistics::allocated_groups>(1);
158 E : stats_.Increment<&PageAllocatorStatistics::allocated_objects>(n);
159 E : stats_.Unlock();
160 :
161 E : *received = n;
162 E : return object;
163 i : }
164 :
165 : // Get the object from a page. Try the active page first and allocate a new
166 : // one if need be.
167 : {
168 E : base::AutoLock lock(lock_);
169 :
170 : // If the current page is not big enough for the requested allocation then
171 : // get a new page.
172 E : DCHECK_LE(current_object_, end_object_);
173 : if (static_cast<size_t>(end_object_ - current_object_) <
174 E : kObjectSize * count) {
175 E : if (!AllocatePageLocked())
176 i : return NULL;
177 : }
178 :
179 E : DCHECK_NE(static_cast<uint8*>(NULL), current_page_);
180 E : DCHECK_NE(static_cast<uint8*>(NULL), current_object_);
181 E : DCHECK_LE(current_object_ + kObjectSize * count, end_object_);
182 :
183 : // Grab a copy of the cursor and advance it.
184 E : object = current_object_;
185 E : current_object_ += kObjectSize * count;
186 E : }
187 :
188 : // Update statistics.
189 E : stats_.Lock();
190 E : stats_.Increment<&PageAllocatorStatistics::allocated_groups>(1);
191 E : stats_.Increment<&PageAllocatorStatistics::allocated_objects>(count);
192 E : stats_.Unlock();
193 :
194 E : *received = count;
195 E : return object;
196 E : }
197 :
198 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
199 : bool kKeepStats>
200 : void PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
201 E : Free(void* object, size_t count) {
202 E : DCHECK_NE(static_cast<void*>(NULL), object);
203 E : DCHECK_LT(0u, count);
204 E : DCHECK_GE(kMaxObjectCount, count);
205 :
206 : #ifndef NDEBUG
207 : // These checks are expensive so only run in debug builds.
208 : // Ensure that the object was actually allocated by this allocator.
209 E : DCHECK(Allocated(object, count));
210 : // Ensure that it has not been freed.
211 E : DCHECK(!Freed(object, count));
212 : #endif
213 :
214 : // Add this object to the list of freed objects for this size class.
215 : // This is a simple allocation that is being returned so both allocated
216 : // groups and objects are decremented.
217 E : FreePush(object, count, true, true);
218 E : }
219 :
220 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
221 : bool kKeepStats>
222 : bool PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
223 E : Allocated(const void* object, size_t count) {
224 E : if (object == NULL || count == 0)
225 i : return false;
226 :
227 E : base::AutoLock lock(lock_);
228 :
229 : // Look for a page owning this object.
230 E : const uint8* alloc = reinterpret_cast<const uint8*>(object);
231 E : const uint8* alloc_end = alloc + count * kObjectSize;
232 E : uint8* page = current_page_;
233 E : while (page) {
234 : // Skip to the next page if it doesn't own this allocation.
235 E : uint8* page_end = page + objects_per_page_ * kObjectSize;
236 E : if (alloc < page || alloc_end > page_end) {
237 i : page = *reinterpret_cast<uint8**>(page_end);
238 i : continue;
239 : }
240 :
241 : // If the allocation hasn't yet been handed out then this page does not own
242 : // it.
243 E : if (page == current_page_ && alloc_end > current_object_)
244 i : return false;
245 :
246 : // Determine if it's aligned as expected.
247 E : if (((alloc - page) % kObjectSize) != 0)
248 i : return false;
249 :
250 : // This allocation must have been previously handed out at some point.
251 : // Note that this does not allow the detection of double frees. Nor does
252 : // it allow us to determine if the object was the return address of an
253 : // allocation, or simply lies somewhere within an allocated chunk.
254 E : return true;
255 i : }
256 :
257 : // The pages have been exhausted and no match was found.
258 i : return false;
259 E : }
260 :
261 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
262 : bool kKeepStats>
263 : bool PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
264 E : Freed(const void* object, size_t count) {
265 E : if (object == NULL)
266 i : return false;
267 :
268 E : DCHECK_NE(static_cast<void*>(NULL), object);
269 :
270 : // Determine the range of size classes to investigate.
271 E : size_t n_min = 1;
272 E : size_t n_max = kMaxObjectCount;
273 E : if (count != 0) {
274 E : n_min = count;
275 E : n_max = count;
276 : }
277 :
278 : // Iterate over the applicable size classes.
279 E : for (size_t n = n_min; n <= n_max; ++n) {
280 E : base::AutoLock lock(free_lock_[n - 1]);
281 :
282 : // Walk the list for this size class.
283 E : uint8* free = free_[n - 1];
284 E : while (free) {
285 E : if (object == free)
286 E : return true;
287 :
288 : // Jump to the next freed object in this size class.
289 E : free = *reinterpret_cast<uint8**>(free);
290 E : }
291 E : }
292 :
293 : // The freed objects have been exhausted and no match was found.
294 E : return false;
295 E : }
296 :
297 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
298 : bool kKeepStats>
299 : void PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
300 : GetStatistics(PageAllocatorStatistics* stats) {
301 : DCHECK_NE(static_cast<PageAllocatorStatistics*>(NULL), stats);
302 :
303 : stats_.Lock();
304 : stats_.GetStatistics(stats);
305 : stats_.Unlock();
306 : }
307 :
308 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
309 : bool kKeepStats>
310 : uint8* PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
311 E : FreePop(size_t count) {
312 E : DCHECK_LT(0u, count);
313 E : DCHECK_GE(kMaxObjectCount, count);
314 :
315 E : uint8* object = NULL;
316 : {
317 E : base::AutoLock lock(free_lock_[count - 1]);
318 E : object = free_[count - 1];
319 E : if (object)
320 E : free_[count - 1] = *reinterpret_cast<uint8**>(object);
321 E : }
322 :
323 : // Update statistics.
324 E : stats_.Lock();
325 E : stats_.Decrement<&PageAllocatorStatistics::freed_groups>(1);
326 E : stats_.Decrement<&PageAllocatorStatistics::freed_objects>(count);
327 E : stats_.Unlock();
328 :
329 E : return object;
330 E : }
331 :
332 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
333 : bool kKeepStats>
334 : void PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
335 : FreePush(void* object, size_t count,
336 E : bool decr_alloc_groups, bool decr_alloc_objects) {
337 E : DCHECK_NE(static_cast<void*>(NULL), object);
338 E : DCHECK_LT(0u, count);
339 E : DCHECK_GE(kMaxObjectCount, count);
340 :
341 : {
342 E : base::AutoLock lock(free_lock_[count - 1]);
343 E : *reinterpret_cast<uint8**>(object) = free_[count - 1];
344 E : free_[count - 1] = reinterpret_cast<uint8*>(object);
345 E : }
346 :
347 : // Update statistics.
348 E : stats_.Lock();
349 E : if (decr_alloc_groups)
350 E : stats_.Decrement<&PageAllocatorStatistics::allocated_groups>(1);
351 E : if (decr_alloc_objects)
352 E : stats_.Decrement<&PageAllocatorStatistics::allocated_objects>(count);
353 E : stats_.Increment<&PageAllocatorStatistics::freed_groups>(1);
354 E : stats_.Increment<&PageAllocatorStatistics::freed_objects>(count);
355 E : stats_.Unlock();
356 E : }
357 :
358 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
359 : bool kKeepStats>
360 : bool PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
361 E : AllocatePageLocked() {
362 E : DCHECK_LT(0u, page_size_);
363 E : lock_.AssertAcquired();
364 :
365 : // If there are remaining objects stuff them into the appropriately sized
366 : // free list.
367 : // NOTE: If this is a point of contention it could be moved to be outside
368 : // the scoped of lock_.
369 E : if (current_object_ < end_object_) {
370 : size_t n = reinterpret_cast<uint8*>(end_object_) -
371 i : reinterpret_cast<uint8*>(current_object_);
372 i : n /= kObjectSize;
373 i : DCHECK_GE(kMaxObjectCount, n);
374 : // These are objects that have never been allocated, so don't affect the
375 : // number of allocated groups or objects.
376 i : if (n != 0)
377 i : FreePush(current_object_, n, false, false);
378 : }
379 :
380 : uint8* page = reinterpret_cast<uint8*>(
381 E : ::VirtualAlloc(NULL, page_size_, MEM_COMMIT, PAGE_READWRITE));
382 E : if (page == NULL)
383 i : return false;
384 :
385 E : uint8* prev = page + page_size_ - sizeof(void*);
386 E : end_object_ = ::common::AlignDown(prev, kObjectSize);
387 :
388 : // Keep a pointer to the previous page, and set up the next object pointer.
389 E : *reinterpret_cast<uint8**>(prev) = current_page_;
390 E : current_page_ = page;
391 E : current_object_ = page;
392 :
393 : // Update statistics.
394 : // NOTE: This can also be moved out from under lock_.
395 E : stats_.Lock();
396 E : stats_.Increment<&PageAllocatorStatistics::page_count>(1);
397 E : stats_.Unlock();
398 :
399 E : return true;
400 E : }
401 :
402 : template<typename ObjectType, size_t kMaxObjectCount, size_t kPageSize,
403 : bool kKeepStats>
404 : ObjectType*
405 : TypedPageAllocator<ObjectType, kMaxObjectCount, kPageSize, kKeepStats>::
406 E : Allocate(size_t count) {
407 E : DCHECK_LT(0u, count);
408 E : DCHECK_GE(kMaxObjectCount, count);
409 E : void* object = Super::Allocate(count);
410 E : return reinterpret_cast<ObjectType*>(object);
411 E : }
412 :
413 : template<typename ObjectType, size_t kMaxObjectCount, size_t kPageSize,
414 : bool kKeepStats>
415 : ObjectType*
416 : TypedPageAllocator<ObjectType, kMaxObjectCount, kPageSize, kKeepStats>::
417 E : Allocate(size_t count, size_t* received) {
418 E : DCHECK_LT(0u, count);
419 E : DCHECK_GE(kMaxObjectCount, count);
420 E : DCHECK_NE(static_cast<size_t*>(NULL), received);
421 E : void* object = Super::Allocate(count, received);
422 E : return reinterpret_cast<ObjectType*>(object);
423 E : }
424 :
425 : template<typename ObjectType, size_t kMaxObjectCount, size_t kPageSize,
426 : bool kKeepStats>
427 : void TypedPageAllocator<ObjectType, kMaxObjectCount, kPageSize, kKeepStats>::
428 E : Free(ObjectType* object, size_t count) {
429 E : DCHECK_NE(static_cast<ObjectType*>(NULL), object);
430 E : DCHECK_LT(0u, count);
431 E : DCHECK_GE(kMaxObjectCount, count);
432 E : Super::Free(object, count);
433 E : }
434 :
435 : } // namespace asan
436 : } // namespace agent
437 :
438 : #endif // SYZYGY_AGENT_ASAN_PAGE_ALLOCATOR_IMPL_H_
|