1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/heaps/zebra_block_heap.h"
16 :
17 : #include <algorithm>
18 :
19 : #include "syzygy/common/align.h"
20 : #include "syzygy/common/asan_parameters.h"
21 :
22 : namespace agent {
23 : namespace asan {
24 : namespace heaps {
25 :
26 E : const size_t ZebraBlockHeap::kSlabSize = 2 * GetPageSize();
27 :
28 E : const size_t ZebraBlockHeap::kMaximumAllocationSize = GetPageSize();
29 :
30 : const size_t ZebraBlockHeap::kMaximumBlockAllocationSize =
31 E : GetPageSize() - sizeof(BlockHeader);
32 :
33 : ZebraBlockHeap::ZebraBlockHeap(size_t heap_size,
34 : MemoryNotifierInterface* memory_notifier,
35 : HeapInterface* internal_heap)
36 : : heap_address_(NULL),
37 : // Makes the heap_size a multiple of kSlabSize to avoid incomplete slabs
38 : // at the end of the reserved memory.
39 : heap_size_(::common::AlignUp(heap_size, kSlabSize)),
40 : slab_count_(heap_size_ / kSlabSize),
41 : slab_info_(HeapAllocator<SlabInfo>(internal_heap)),
42 : quarantine_ratio_(::common::kDefaultZebraBlockHeapQuarantineRatio),
43 : free_slabs_(slab_count_,
44 : HeapAllocator<size_t>(internal_heap)),
45 : quarantine_(slab_count_,
46 : HeapAllocator<size_t>(internal_heap)),
47 E : memory_notifier_(memory_notifier) {
48 E : DCHECK_NE(reinterpret_cast<MemoryNotifierInterface*>(NULL), memory_notifier);
49 :
50 : // Allocate the chunk of memory directly from the OS.
51 : heap_address_ = reinterpret_cast<uint8*>(
52 : ::VirtualAlloc(NULL,
53 : heap_size_,
54 : MEM_RESERVE | MEM_COMMIT,
55 E : PAGE_READWRITE));
56 E : CHECK_NE(reinterpret_cast<uint8*>(NULL), heap_address_);
57 E : DCHECK(::common::IsAligned(heap_address_, GetPageSize()));
58 E : memory_notifier_->NotifyFutureHeapUse(heap_address_, heap_size_);
59 :
60 : // Initialize the metadata describing the state of our heap.
61 E : slab_info_.resize(slab_count_);
62 E : for (size_t i = 0; i < slab_count_; ++i) {
63 E : slab_info_[i].state = kFreeSlab;
64 E : ::memset(&slab_info_[i].info, 0, sizeof(slab_info_[i].info));
65 E : free_slabs_.push(i);
66 E : }
67 E : }
68 :
69 E : ZebraBlockHeap::~ZebraBlockHeap() {
70 E : DCHECK_NE(reinterpret_cast<uint8*>(NULL), heap_address_);
71 E : CHECK_NE(FALSE, ::VirtualFree(heap_address_, 0, MEM_RELEASE));
72 E : memory_notifier_->NotifyReturnedToOS(heap_address_, heap_size_);
73 E : heap_address_ = NULL;
74 E : }
75 :
76 E : HeapType ZebraBlockHeap::GetHeapType() const {
77 E : return kZebraBlockHeap;
78 E : }
79 :
80 E : uint32 ZebraBlockHeap::GetHeapFeatures() const {
81 : return kHeapSupportsIsAllocated | kHeapReportsReservations |
82 E : kHeapSupportsGetAllocationSize;
83 E : }
84 :
85 E : void* ZebraBlockHeap::Allocate(size_t bytes) {
86 E : SlabInfo* slab_info = AllocateImpl(bytes);
87 E : if (slab_info == NULL)
88 E : return NULL;
89 E : return slab_info->info.header;
90 E : }
91 :
92 E : bool ZebraBlockHeap::Free(void* alloc) {
93 E : if (alloc == NULL)
94 i : return true;
95 E : ::common::AutoRecursiveLock lock(lock_);
96 E : size_t slab_index = GetSlabIndex(alloc);
97 E : if (slab_index == kInvalidSlabIndex)
98 i : return false;
99 E : if (slab_info_[slab_index].info.header != alloc)
100 i : return false;
101 :
102 : // Memory must be released from the quarantine before calling Free.
103 E : DCHECK_NE(kQuarantinedSlab, slab_info_[slab_index].state);
104 :
105 E : if (slab_info_[slab_index].state == kFreeSlab)
106 i : return false;
107 :
108 : // Make the slab available for allocations.
109 E : slab_info_[slab_index].state = kFreeSlab;
110 : ::memset(&slab_info_[slab_index].info, 0,
111 E : sizeof(slab_info_[slab_index].info));
112 E : free_slabs_.push(slab_index);
113 E : return true;
114 E : }
115 :
116 E : bool ZebraBlockHeap::IsAllocated(const void* alloc) {
117 E : if (alloc == NULL)
118 E : return false;
119 E : ::common::AutoRecursiveLock lock(lock_);
120 E : size_t slab_index = GetSlabIndex(alloc);
121 E : if (slab_index == kInvalidSlabIndex)
122 i : return false;
123 E : if (slab_info_[slab_index].state == kFreeSlab)
124 E : return false;
125 E : if (slab_info_[slab_index].info.header != alloc)
126 E : return false;
127 E : return true;
128 E : }
129 :
130 E : size_t ZebraBlockHeap::GetAllocationSize(const void* alloc) {
131 E : if (alloc == NULL)
132 i : return kUnknownSize;
133 E : ::common::AutoRecursiveLock lock(lock_);
134 E : size_t slab_index = GetSlabIndex(alloc);
135 E : if (slab_index == kInvalidSlabIndex)
136 i : return kUnknownSize;
137 E : if (slab_info_[slab_index].state == kFreeSlab)
138 i : return kUnknownSize;
139 E : if (slab_info_[slab_index].info.header != alloc)
140 i : return kUnknownSize;
141 E : return slab_info_[slab_index].info.block_size;
142 E : }
143 :
144 E : void ZebraBlockHeap::Lock() {
145 E : lock_.Acquire();
146 E : }
147 :
148 E : void ZebraBlockHeap::Unlock() {
149 E : lock_.Release();
150 E : }
151 :
152 E : bool ZebraBlockHeap::TryLock() {
153 E : return lock_.Try();
154 E : }
155 :
156 : void* ZebraBlockHeap::AllocateBlock(size_t size,
157 : size_t min_left_redzone_size,
158 : size_t min_right_redzone_size,
159 E : BlockLayout* layout) {
160 E : DCHECK_NE(static_cast<BlockLayout*>(nullptr), layout);
161 : // Abort if the redzones do not fit in a page. Even if the allocation
162 : // is possible it will lead to a non-standard block layout.
163 E : if (min_left_redzone_size + size > GetPageSize())
164 E : return NULL;
165 E : if (min_right_redzone_size > GetPageSize())
166 E : return NULL;
167 :
168 : // Plan the block layout.
169 : if (!BlockPlanLayout(GetPageSize(),
170 : kShadowRatio,
171 : size,
172 : min_left_redzone_size,
173 : std::max(GetPageSize(), min_right_redzone_size),
174 E : layout)) {
175 i : return nullptr;
176 : }
177 :
178 E : if (layout->block_size != kSlabSize)
179 E : return nullptr;
180 : size_t right_redzone_size = layout->trailer_size +
181 E : layout->trailer_padding_size;
182 : // Part of the body lies inside an "odd" page.
183 E : if (right_redzone_size < GetPageSize())
184 i : return nullptr;
185 : // There should be less than kShadowRatio bytes between the body end
186 : // and the "odd" page.
187 E : if (right_redzone_size - GetPageSize() >= kShadowRatio)
188 i : return nullptr;
189 :
190 : // Allocate space for the block, and update the slab info to reflect the right
191 : // redzone.
192 E : void* alloc = nullptr;
193 E : SlabInfo* slab_info = AllocateImpl(GetPageSize());
194 E : if (slab_info != nullptr) {
195 E : slab_info->info.block_size = layout->block_size;
196 : slab_info->info.header_size = layout->header_size +
197 E : layout->header_padding_size;
198 : slab_info->info.trailer_size = layout->trailer_size +
199 E : layout->trailer_padding_size;
200 E : slab_info->info.is_nested = false;
201 E : alloc = slab_info->info.header;
202 : }
203 :
204 E : DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(alloc) % kShadowRatio);
205 E : return alloc;
206 E : }
207 :
208 E : bool ZebraBlockHeap::FreeBlock(const BlockInfo& block_info) {
209 E : DCHECK_NE(static_cast<BlockHeader*>(nullptr), block_info.header);
210 E : if (!Free(block_info.header))
211 i : return false;
212 E : return true;
213 E : }
214 :
215 E : bool ZebraBlockHeap::Push(const CompactBlockInfo& info) {
216 E : ::common::AutoRecursiveLock lock(lock_);
217 E : size_t slab_index = GetSlabIndex(info.header);
218 E : if (slab_index == kInvalidSlabIndex)
219 i : return false;
220 E : if (slab_info_[slab_index].state != kAllocatedSlab)
221 i : return false;
222 : if (::memcmp(&slab_info_[slab_index].info, &info,
223 E : sizeof(info)) != 0) {
224 i : return false;
225 : }
226 :
227 E : quarantine_.push(slab_index);
228 E : slab_info_[slab_index].state = kQuarantinedSlab;
229 E : return true;
230 E : }
231 :
232 E : bool ZebraBlockHeap::Pop(CompactBlockInfo* info) {
233 E : ::common::AutoRecursiveLock lock(lock_);
234 :
235 E : if (QuarantineInvariantIsSatisfied())
236 E : return false;
237 :
238 E : size_t slab_index = quarantine_.front();
239 E : DCHECK_NE(kInvalidSlabIndex, slab_index);
240 E : quarantine_.pop();
241 :
242 E : DCHECK_EQ(kQuarantinedSlab, slab_info_[slab_index].state);
243 E : slab_info_[slab_index].state = kAllocatedSlab;
244 E : *info = slab_info_[slab_index].info;
245 :
246 E : return true;
247 E : }
248 :
249 E : void ZebraBlockHeap::Empty(ObjectVector* infos) {
250 E : ::common::AutoRecursiveLock lock(lock_);
251 E : while (!quarantine_.empty()) {
252 E : size_t slab_index = quarantine_.front();
253 E : DCHECK_NE(kInvalidSlabIndex, slab_index);
254 E : quarantine_.pop();
255 :
256 : // Do not free the slab, only release it from the quarantine.
257 E : slab_info_[slab_index].state = kAllocatedSlab;
258 E : infos->push_back(slab_info_[slab_index].info);
259 E : }
260 E : }
261 :
262 E : size_t ZebraBlockHeap::GetCount() {
263 E : ::common::AutoRecursiveLock lock(lock_);
264 E : return quarantine_.size();
265 E : }
266 :
267 E : void ZebraBlockHeap::set_quarantine_ratio(float quarantine_ratio) {
268 E : DCHECK_LE(0, quarantine_ratio);
269 E : DCHECK_GE(1, quarantine_ratio);
270 E : ::common::AutoRecursiveLock lock(lock_);
271 E : quarantine_ratio_ = quarantine_ratio;
272 E : }
273 :
274 E : ZebraBlockHeap::SlabInfo* ZebraBlockHeap::AllocateImpl(size_t bytes) {
275 E : if (bytes == 0 || bytes > GetPageSize())
276 E : return NULL;
277 E : ::common::AutoRecursiveLock lock(lock_);
278 :
279 E : if (free_slabs_.empty())
280 E : return NULL;
281 :
282 E : size_t slab_index = free_slabs_.front();
283 E : DCHECK_NE(kInvalidSlabIndex, slab_index);
284 E : free_slabs_.pop();
285 E : uint8* slab_address = GetSlabAddress(slab_index);
286 E : DCHECK_NE(reinterpret_cast<uint8*>(NULL), slab_address);
287 :
288 : // Push the allocation to the end of the even page.
289 E : uint8* alloc = slab_address + GetPageSize() - bytes;
290 E : alloc = ::common::AlignDown(alloc, kShadowRatio);
291 :
292 : // Update the slab info.
293 E : SlabInfo* slab_info = &slab_info_[slab_index];
294 E : slab_info->state = kAllocatedSlab;
295 E : slab_info->info.header = reinterpret_cast<BlockHeader*>(alloc);
296 E : slab_info->info.block_size = bytes;
297 E : slab_info->info.header_size = 0;
298 E : slab_info->info.trailer_size = 0;
299 E : slab_info->info.is_nested = false;
300 :
301 E : return slab_info;
302 E : }
303 :
304 E : bool ZebraBlockHeap::QuarantineInvariantIsSatisfied() {
305 : return quarantine_.empty() ||
306 : (quarantine_.size() / static_cast<float>(slab_count_) <=
307 E : quarantine_ratio_);
308 E : }
309 :
310 E : uint8* ZebraBlockHeap::GetSlabAddress(size_t index) {
311 E : if (index >= slab_count_)
312 i : return NULL;
313 E : return heap_address_ + index * kSlabSize;
314 E : }
315 :
316 E : size_t ZebraBlockHeap::GetSlabIndex(const void* address) {
317 E : if (address < heap_address_ || address >= heap_address_ + heap_size_)
318 i : return kInvalidSlabIndex;
319 E : return (reinterpret_cast<const uint8*>(address) - heap_address_) / kSlabSize;
320 E : }
321 :
322 : } // namespace heaps
323 : } // namespace asan
324 : } // namespace agent
|