1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 : //
15 : // Implementation details for PageAllocator. This is not meant to be
16 : // included directly.
17 :
18 : #ifndef SYZYGY_AGENT_ASAN_PAGE_ALLOCATOR_IMPL_H_
19 : #define SYZYGY_AGENT_ASAN_PAGE_ALLOCATOR_IMPL_H_
20 :
21 : #include <windows.h>
22 :
23 : #include <algorithm>
24 :
25 : #include "base/logging.h"
26 : #include "syzygy/agent/asan/constants.h"
27 : #include "syzygy/common/align.h"
28 :
29 : namespace agent {
30 : namespace asan {
31 :
32 : namespace detail {
33 :
34 : // Empty statistics helper.
35 : template<> struct PageAllocatorStatisticsHelper<false> {
36 E : void Lock() { }
37 E : void Unlock() { }
38 E : template<size_t PageAllocatorStatistics::*stat> void Increment(size_t) { }
39 E : template<size_t PageAllocatorStatistics::*stat> void Decrement(size_t) { }
40 : void GetStatistics(PageAllocatorStatistics* stats) const {
41 : DCHECK_NE(static_cast<PageAllocatorStatistics*>(nullptr), stats);
42 : ::memset(stats, 0, sizeof(*stats));
43 : }
44 : };
45 :
46 : // Actual statistics helper.
47 : template<> struct PageAllocatorStatisticsHelper<true> {
48 E : PageAllocatorStatisticsHelper() {
49 E : ::memset(&stats, 0, sizeof(stats));
50 E : }
51 :
52 E : void Lock() { lock.Acquire(); }
53 E : void Unlock() { lock.Release(); }
54 :
55 : template<size_t PageAllocatorStatistics::*member>
56 E : void Increment(size_t amount) {
57 E : lock.AssertAcquired();
58 E : stats.*member += amount;
59 E : }
60 :
61 : template<size_t PageAllocatorStatistics::*member>
62 E : void Decrement(size_t amount) {
63 E : lock.AssertAcquired();
64 E : stats.*member -= amount;
65 E : }
66 :
67 : void GetStatistics(PageAllocatorStatistics* stats) const {
68 : lock.AssertAcquired();
69 : DCHECK_NE(static_cast<PageAllocatorStatistics*>(nullptr), stats);
70 : *stats = this->stats;
71 : }
72 :
73 : base::Lock lock;
74 : PageAllocatorStatistics stats;
75 : };
76 :
77 : // This is the internal object type used by the page allocator. This allows
78 : // easy free list chaining.
79 : template<size_t kObjectSize>
80 : struct PageAllocatorObject {
81 : typedef PageAllocatorObject<kObjectSize> Self;
82 :
83 : union {
84 : uint8 object_data[kObjectSize];
85 : Self* next_free;
86 : };
87 : };
88 :
89 :
90 : template<size_t kMinPageSize>
91 : struct PageAllocatorPageSize {
92 : // The kPageSize calculation below presumes a 64KB allocation
93 : // granularity. If this changes for whatever reason the logic needs
94 : // to be updated.
95 : static_assert(64 * 1024 == kUsualAllocationGranularity,
96 : "Logic out of sync with allocation granularity.");
97 :
98 : // Round up to the nearest multiple of the allocation granularity.
99 : static const size_t kSlabSize =
100 : (kMinPageSize + kUsualAllocationGranularity - 1) &
101 : ~(kUsualAllocationGranularity - 1);
102 :
103 : // Calculate a number of pages that divides the allocation granularity,
104 : // or that is a multiple of it.
105 : static const size_t kPageSize =
106 : (kMinPageSize <= (1<<12) ? (1<<12) : // 4KB.
107 : (kMinPageSize <= (1<<13) ? (1<<13) : // 8KB.
108 : (kMinPageSize <= (1<<14) ? (1<<14) : // 16KB.
109 : (kMinPageSize <= (1<<15) ? (1<<15) : // 32KB.
110 : kSlabSize))));
111 : };
112 :
113 : // The internal page type used by the page allocator. Allows easy page list
114 : // chaining.
115 : template<size_t kObjectSize, size_t kMinPageSize>
116 : struct PageAllocatorPage {
117 : typedef PageAllocatorPage<kObjectSize, kMinPageSize> Self;
118 : typedef PageAllocatorObject<kObjectSize> Object;
119 : typedef PageAllocatorPageSize<kMinPageSize> PageSize;
120 :
121 : // The OS reserves virtual memory in chunks of 64KB, and backs them with real
122 : // memory with pages of 4KB. We want to use a page size that is a multiple of
123 : // 4KB, and a divisor of 64KB, or a multiple of 64KB.
124 : static const size_t kPageSize = PageSize::kPageSize;
125 : static const size_t kSlabSize = PageSize::kSlabSize;
126 : static const size_t kPagesPerSlab = kSlabSize / kPageSize;
127 :
128 : static const size_t kObjectsPerPage =
129 : (kPageSize - sizeof(void*)) / sizeof(Object);
130 :
131 E : const Object* end() const {
132 E : return objects + kObjectsPerPage;
133 E : }
134 :
135 : union {
136 : struct {
137 : Object objects[kObjectsPerPage];
138 : Self* prev_page;
139 : };
140 : uint8 unused[kPageSize];
141 : };
142 : };
143 :
144 : } // namespace detail
145 :
146 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
147 : bool kKeepStats>
148 : PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
149 : PageAllocator()
150 : : page_count_(0), slab_(nullptr), slab_cursor_(nullptr), page_(nullptr),
151 E : object_(nullptr) {
152 : static_assert(kPageSize > kObjectSize,
153 : "Page size should be bigger than the object size.");
154 : static_assert(kObjectSize >= sizeof(uintptr_t), "Object size is too small.");
155 : static_assert(kObjectSize <= sizeof(Object), "Object is too small.");
156 : static_assert(sizeof(Object) < kObjectSize + 4, "Object is too large.");
157 : static_assert(kPageSize <= sizeof(Page), "Page is too small.");
158 : static_assert(sizeof(Page) % kUsualPageSize == 0, "Invalid page size.");
159 :
160 : // Clear the freelists.
161 E : ::memset(free_, 0, sizeof(free_));
162 E : }
163 :
164 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
165 : bool kKeepStats>
166 : PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
167 E : ~PageAllocator() {
168 : // Iterate over the pages and make not of the slab addresses. These will
169 : // be pages whose root address a multiple of the allocation granulairty.
170 E : Page* page = page_;
171 E : size_t page_count = 0;
172 E : size_t slab_count = 0;
173 E : while (page) {
174 : // Pages are chained in reverse order, and allocated moving forward through
175 : // a slab. Thus it is safe for us to remove the entire slab when we
176 : // encounter the first page within it, as we'll already have iterated
177 : // through the other pages in the slab.
178 E : ++page_count;
179 E : Page* prev_page = page->prev_page;
180 E : if (::common::IsAligned(page, kUsualAllocationGranularity)) {
181 E : ++slab_count;
182 E : CHECK_EQ(TRUE, ::VirtualFree(page, 0, MEM_RELEASE));
183 : }
184 E : page = prev_page;
185 E : }
186 E : DCHECK_EQ(page_count_, page_count);
187 :
188 : // Determine how many slabs we expected to see and confirm that we saw that
189 : // many.
190 : size_t expected_slab_count = (page_count + Page::kPagesPerSlab - 1) /
191 E : Page::kPagesPerSlab;
192 E : DCHECK_EQ(expected_slab_count, slab_count);
193 E : }
194 :
195 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
196 : bool kKeepStats>
197 : void* PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
198 E : Allocate(size_t count) {
199 E : size_t received = 0;
200 E : void* alloc = Allocate(count, &received);
201 :
202 : // If there were leftover objects in the allocation then shard it and
203 : // add them to the appropriate free list.
204 E : if (count < received) {
205 E : size_t n = received - count;
206 E : Object* remaining = reinterpret_cast<Object*>(alloc) + count;
207 : // These objects are part of an active allocation that are being returned.
208 : // Thus we don't decrement the number of allocated groups, but we do
209 : // decrement the number of allocated objects.
210 E : FreePush(remaining, n, false, true);
211 : }
212 :
213 E : return alloc;
214 E : }
215 :
216 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
217 : bool kKeepStats>
218 : void* PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
219 E : Allocate(size_t count, size_t* received) {
220 E : DCHECK_LT(0u, count);
221 E : DCHECK_GE(kMaxObjectCount, count);
222 E : DCHECK_NE(static_cast<size_t*>(nullptr), received);
223 :
224 E : void* object = nullptr;
225 :
226 : // Look to the lists of freed objects and try to use one of those. Use the
227 : // first one that's big enough, and stuff the leftover objects into another
228 : // freed list.
229 E : for (size_t n = count; n <= kMaxObjectCount; ++n) {
230 : // This is racy and can end up lying to us. However, it's faster to first
231 : // check this outside of the lock. We do this properly afterwards.
232 E : if (free_[n - 1] == nullptr)
233 E : continue;
234 :
235 : // Unlink the objects from the free list of size n. This actually acquires
236 : // the appropriate free-list lock.
237 E : object = FreePop(n);
238 E : if (object == nullptr)
239 i : continue;
240 :
241 : // Update statistics.
242 E : stats_.Lock();
243 E : stats_.Increment<&PageAllocatorStatistics::allocated_groups>(1);
244 E : stats_.Increment<&PageAllocatorStatistics::allocated_objects>(n);
245 E : stats_.Unlock();
246 :
247 E : *received = n;
248 E : return object;
249 i : }
250 :
251 : // Get the object from a page. Try the active page first and allocate a new
252 : // one if need be.
253 : {
254 E : base::AutoLock lock(lock_);
255 :
256 : // If the current page is not big enough for the requested allocation then
257 : // get a new page.
258 E : if (page_ == nullptr || page_->end() - object_ < count) {
259 E : if (!AllocatePageLocked())
260 i : return nullptr;
261 : }
262 :
263 E : DCHECK_NE(static_cast<Page*>(nullptr), page_);
264 E : DCHECK_LT(object_, page_->end());
265 :
266 : // Grab a copy of the cursor and advance it.
267 E : object = object_;
268 E : object_ += count;
269 E : }
270 :
271 : // Update statistics.
272 E : stats_.Lock();
273 E : stats_.Increment<&PageAllocatorStatistics::allocated_groups>(1);
274 E : stats_.Increment<&PageAllocatorStatistics::allocated_objects>(count);
275 E : stats_.Unlock();
276 :
277 E : *received = count;
278 E : return object;
279 E : }
280 :
281 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
282 : bool kKeepStats>
283 : void PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
284 E : Free(void* object, size_t count) {
285 E : DCHECK_NE(static_cast<void*>(nullptr), object);
286 E : DCHECK_LT(0u, count);
287 E : DCHECK_GE(kMaxObjectCount, count);
288 :
289 : #ifndef NDEBUG
290 : // These checks are expensive so only run in debug builds.
291 : // Ensure the block is currently allocated by the allocator.
292 E : DCHECK_EQ(1, AllocationStatus(object, count));
293 : #endif
294 :
295 : // Add this object to the list of freed objects for this size class.
296 : // This is a simple allocation that is being returned so both allocated
297 : // groups and objects are decremented.
298 E : FreePush(reinterpret_cast<Object*>(object), count, true, true);
299 E : }
300 :
301 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
302 : bool kKeepStats>
303 : void PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
304 : GetStatistics(PageAllocatorStatistics* stats) {
305 : DCHECK_NE(static_cast<PageAllocatorStatistics*>(nullptr), stats);
306 :
307 : stats_.Lock();
308 : stats_.GetStatistics(stats);
309 : stats_.Unlock();
310 : }
311 :
312 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
313 : bool kKeepStats>
314 : int PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
315 E : AllocationStatus(const void* object, size_t count) {
316 : // If the memory was never allocated then it's under management.
317 E : if (!WasOnceAllocated(object, count))
318 i : return -1;
319 : // The memory has been allocated, but it may since have been freed.
320 E : if (IsInFreeList(object, count))
321 E : return 0;
322 : // It's been allocated and it's not in the freed list. Must still be
323 : // a valid allocation!
324 E : return 1;
325 E : }
326 :
327 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
328 : bool kKeepStats>
329 : bool PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
330 E : WasOnceAllocated(const void* object, size_t count) {
331 E : if (object == nullptr || count == 0)
332 i : return false;
333 :
334 E : base::AutoLock lock(lock_);
335 :
336 : // Look for a page owning this object.
337 E : const Object* object_begin = reinterpret_cast<const Object*>(object);
338 E : const Object* object_end = object_begin + count;
339 E : Page* page = page_;
340 E : while (page) {
341 : // If this page does not contain the objects entirely, then skip to the next
342 : // page.
343 E : if (object_begin < page->objects || object_end > page->end()) {
344 E : page = page->prev_page;
345 E : continue;
346 : }
347 :
348 : // If the allocation hasn't yet been handed out then this page does not own
349 : // it.
350 E : if (page == page_ && object_end > object_)
351 i : return false;
352 :
353 : // Determine if it's aligned as expected.
354 : size_t offset =
355 : reinterpret_cast<const uint8*>(object) -
356 E : reinterpret_cast<const uint8*>(page);
357 E : if ((offset % sizeof(Object)) != 0)
358 i : return false;
359 :
360 : // This allocation must have been previously handed out at some point.
361 : // Note that this does not allow the detection of double frees. Nor does
362 : // it allow us to determine if the object was the return address of an
363 : // allocation, or simply lies somewhere within an allocated chunk.
364 E : return true;
365 i : }
366 :
367 : // The pages have been exhausted and no match was found.
368 i : return false;
369 E : }
370 :
371 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
372 : bool kKeepStats>
373 : bool PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
374 E : IsInFreeList(const void* object, size_t count) {
375 E : if (object == nullptr)
376 i : return false;
377 :
378 E : DCHECK_NE(static_cast<void*>(nullptr), object);
379 :
380 : // Determine the range of size classes to investigate.
381 E : size_t n_min = 1;
382 E : size_t n_max = kMaxObjectCount;
383 E : if (count != 0) {
384 E : n_min = count;
385 E : n_max = count;
386 : }
387 :
388 : // Iterate over the applicable size classes.
389 E : for (size_t n = n_min; n <= n_max; ++n) {
390 E : base::AutoLock lock(free_lock_[n - 1]);
391 :
392 : // Walk the list for this size class.
393 E : Object* free = free_[n - 1];
394 E : while (free) {
395 E : if (free == object)
396 E : return true;
397 :
398 : // Jump to the next freed object in this size class.
399 E : free = free->next_free;
400 E : }
401 E : }
402 :
403 : // The freed objects have been exhausted and no match was found.
404 E : return false;
405 E : }
406 :
407 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
408 : bool kKeepStats>
409 : typename
410 : PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::Object*
411 : PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
412 E : FreePop(size_t count) {
413 E : DCHECK_LT(0u, count);
414 E : DCHECK_GE(kMaxObjectCount, count);
415 :
416 E : Object* object = nullptr;
417 : {
418 E : base::AutoLock lock(free_lock_[count - 1]);
419 E : object = free_[count - 1];
420 E : if (object)
421 E : free_[count - 1] = object->next_free;
422 E : }
423 E : object->next_free = nullptr;
424 :
425 : // Update statistics.
426 E : stats_.Lock();
427 E : stats_.Decrement<&PageAllocatorStatistics::freed_groups>(1);
428 E : stats_.Decrement<&PageAllocatorStatistics::freed_objects>(count);
429 E : stats_.Unlock();
430 :
431 E : return object;
432 E : }
433 :
434 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
435 : bool kKeepStats>
436 : void PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
437 : FreePush(Object* object, size_t count,
438 E : bool decr_alloc_groups, bool decr_alloc_objects) {
439 E : DCHECK_NE(static_cast<void*>(nullptr), object);
440 E : DCHECK_LT(0u, count);
441 E : DCHECK_GE(kMaxObjectCount, count);
442 :
443 : {
444 E : base::AutoLock lock(free_lock_[count - 1]);
445 E : object->next_free = free_[count - 1];
446 E : free_[count - 1] = object;
447 E : }
448 :
449 : // Update statistics.
450 E : stats_.Lock();
451 E : if (decr_alloc_groups)
452 E : stats_.Decrement<&PageAllocatorStatistics::allocated_groups>(1);
453 E : if (decr_alloc_objects)
454 E : stats_.Decrement<&PageAllocatorStatistics::allocated_objects>(count);
455 E : stats_.Increment<&PageAllocatorStatistics::freed_groups>(1);
456 E : stats_.Increment<&PageAllocatorStatistics::freed_objects>(count);
457 E : stats_.Unlock();
458 E : }
459 :
460 : template<size_t kObjectSize, size_t kMaxObjectCount, size_t kPageSize,
461 : bool kKeepStats>
462 : bool PageAllocator<kObjectSize, kMaxObjectCount, kPageSize, kKeepStats>::
463 E : AllocatePageLocked() {
464 E : lock_.AssertAcquired();
465 :
466 : // If there are remaining objects stuff them into the appropriately sized
467 : // free list.
468 : // NOTE: If this is a point of contention it could be moved to be outside
469 : // the scoped of lock_.
470 E : if (page_ && object_ < page_->end()) {
471 i : size_t n = page_->end() - object_;
472 i : DCHECK_LT(0u, n);
473 i : DCHECK_GE(kMaxObjectCount, n);
474 :
475 : // These are objects that have never been allocated, so don't affect the
476 : // number of allocated groups or objects.
477 i : FreePush(object_, n, false, false);
478 : }
479 :
480 E : Page* slab_end = slab_ + Page::kPagesPerSlab;
481 :
482 : // Grab a new slab if needed.
483 E : if (slab_ == nullptr || slab_cursor_ >= slab_end) {
484 : void* slab = ::VirtualAlloc(
485 E : nullptr, Page::kSlabSize, MEM_RESERVE, PAGE_NOACCESS);
486 E : if (slab == nullptr)
487 i : return false;
488 :
489 : // Update the slab and next page cursor.
490 E : slab_ = reinterpret_cast<Page*>(slab);
491 E : slab_cursor_ = slab_;
492 : }
493 :
494 : // Commit the next page. If this fails to commit we simply explode.
495 : Page* page = reinterpret_cast<Page*>(::VirtualAlloc(
496 E : slab_cursor_, sizeof(Page), MEM_COMMIT, PAGE_READWRITE));
497 E : if (page == nullptr)
498 i : return false;
499 E : DCHECK_EQ(page, slab_cursor_);
500 :
501 : // Update the slab cursor.
502 E : ++slab_cursor_;
503 :
504 : // Keep a pointer to the previous page, and set up the next object pointer.
505 E : page->prev_page = page_;
506 E : page_ = page;
507 E : object_ = page->objects;
508 E : ++page_count_;
509 :
510 : // Update statistics.
511 : // NOTE: This can also be moved out from under lock_.
512 E : stats_.Lock();
513 E : stats_.Increment<&PageAllocatorStatistics::page_count>(1);
514 E : stats_.Unlock();
515 :
516 E : return true;
517 E : }
518 :
519 : template<typename ObjectType, size_t kMaxObjectCount, size_t kPageSize,
520 : bool kKeepStats>
521 : ObjectType*
522 : TypedPageAllocator<ObjectType, kMaxObjectCount, kPageSize, kKeepStats>::
523 E : Allocate(size_t count) {
524 E : DCHECK_LT(0u, count);
525 E : DCHECK_GE(kMaxObjectCount, count);
526 E : void* object = Super::Allocate(count);
527 E : return reinterpret_cast<ObjectType*>(object);
528 E : }
529 :
530 : template<typename ObjectType, size_t kMaxObjectCount, size_t kPageSize,
531 : bool kKeepStats>
532 : ObjectType*
533 : TypedPageAllocator<ObjectType, kMaxObjectCount, kPageSize, kKeepStats>::
534 E : Allocate(size_t count, size_t* received) {
535 E : DCHECK_LT(0u, count);
536 E : DCHECK_GE(kMaxObjectCount, count);
537 E : DCHECK_NE(static_cast<size_t*>(nullptr), received);
538 E : void* object = Super::Allocate(count, received);
539 E : return reinterpret_cast<ObjectType*>(object);
540 E : }
541 :
542 : template<typename ObjectType, size_t kMaxObjectCount, size_t kPageSize,
543 : bool kKeepStats>
544 : void TypedPageAllocator<ObjectType, kMaxObjectCount, kPageSize, kKeepStats>::
545 E : Free(ObjectType* object, size_t count) {
546 E : DCHECK_NE(static_cast<ObjectType*>(nullptr), object);
547 E : DCHECK_LT(0u, count);
548 E : DCHECK_GE(kMaxObjectCount, count);
549 E : Super::Free(object, count);
550 E : }
551 :
552 : } // namespace asan
553 : } // namespace agent
554 :
555 : #endif // SYZYGY_AGENT_ASAN_PAGE_ALLOCATOR_IMPL_H_
|