1 : // Copyright 2014 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/heaps/zebra_block_heap.h"
16 :
17 : #include <algorithm>
18 :
19 : #include "syzygy/common/align.h"
20 : #include "syzygy/common/asan_parameters.h"
21 :
22 : namespace agent {
23 : namespace asan {
24 : namespace heaps {
25 :
26 E : const size_t ZebraBlockHeap::kSlabSize = 2 * GetPageSize();
27 :
28 E : const size_t ZebraBlockHeap::kMaximumAllocationSize = GetPageSize();
29 :
30 : const size_t ZebraBlockHeap::kMaximumBlockAllocationSize =
31 E : GetPageSize() - sizeof(BlockHeader);
32 :
33 : ZebraBlockHeap::ZebraBlockHeap(size_t heap_size,
34 : MemoryNotifierInterface* memory_notifier,
35 : HeapInterface* internal_heap)
36 E : : heap_address_(NULL),
37 : // Makes the heap_size a multiple of kSlabSize to avoid incomplete slabs
38 : // at the end of the reserved memory.
39 E : heap_size_(::common::AlignUp(heap_size, kSlabSize)),
40 E : slab_count_(heap_size_ / kSlabSize),
41 E : slab_info_(HeapAllocator<SlabInfo>(internal_heap)),
42 E : quarantine_ratio_(::common::kDefaultZebraBlockHeapQuarantineRatio),
43 E : free_slabs_(slab_count_,
44 : HeapAllocator<size_t>(internal_heap)),
45 E : quarantine_(slab_count_,
46 : HeapAllocator<size_t>(internal_heap)),
47 E : memory_notifier_(memory_notifier) {
48 E : DCHECK_NE(static_cast<MemoryNotifierInterface*>(nullptr), memory_notifier);
49 :
50 : // Allocate the chunk of memory directly from the OS.
51 E : heap_address_ = static_cast<uint8_t*>(::VirtualAlloc(
52 : NULL, heap_size_, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE));
53 E : CHECK_NE(static_cast<uint8_t*>(nullptr), heap_address_);
54 E : DCHECK(::common::IsAligned(heap_address_, GetPageSize()));
55 E : memory_notifier_->NotifyFutureHeapUse(heap_address_, heap_size_);
56 :
57 : // Initialize the metadata describing the state of our heap.
58 E : slab_info_.resize(slab_count_);
59 E : for (size_t i = 0; i < slab_count_; ++i) {
60 E : slab_info_[i].state = kFreeSlab;
61 E : ::memset(&slab_info_[i].info, 0, sizeof(slab_info_[i].info));
62 E : free_slabs_.push(i);
63 E : }
64 E : }
65 :
66 E : ZebraBlockHeap::~ZebraBlockHeap() {
67 E : DCHECK_NE(static_cast<uint8_t*>(nullptr), heap_address_);
68 E : CHECK_NE(FALSE, ::VirtualFree(heap_address_, 0, MEM_RELEASE));
69 E : memory_notifier_->NotifyReturnedToOS(heap_address_, heap_size_);
70 E : heap_address_ = NULL;
71 E : }
72 :
73 E : HeapType ZebraBlockHeap::GetHeapType() const {
74 E : return kZebraBlockHeap;
75 E : }
76 :
77 E : uint32_t ZebraBlockHeap::GetHeapFeatures() const {
78 E : return kHeapSupportsIsAllocated | kHeapReportsReservations |
79 : kHeapSupportsGetAllocationSize;
80 E : }
81 :
82 E : void* ZebraBlockHeap::Allocate(uint32_t bytes) {
83 E : SlabInfo* slab_info = AllocateImpl(bytes);
84 E : if (slab_info == NULL)
85 E : return NULL;
86 E : return slab_info->info.header;
87 E : }
88 :
89 E : bool ZebraBlockHeap::Free(void* alloc) {
90 E : if (alloc == NULL)
91 i : return true;
92 E : ::common::AutoRecursiveLock lock(lock_);
93 E : size_t slab_index = GetSlabIndex(alloc);
94 E : if (slab_index == kInvalidSlabIndex)
95 i : return false;
96 E : if (slab_info_[slab_index].info.header != alloc)
97 i : return false;
98 :
99 : // Memory must be released from the quarantine before calling Free.
100 E : DCHECK_NE(kQuarantinedSlab, slab_info_[slab_index].state);
101 :
102 E : if (slab_info_[slab_index].state == kFreeSlab)
103 i : return false;
104 :
105 : // Make the slab available for allocations.
106 E : slab_info_[slab_index].state = kFreeSlab;
107 E : ::memset(&slab_info_[slab_index].info, 0,
108 : sizeof(slab_info_[slab_index].info));
109 E : free_slabs_.push(slab_index);
110 E : return true;
111 E : }
112 :
113 E : bool ZebraBlockHeap::IsAllocated(const void* alloc) {
114 E : if (alloc == NULL)
115 E : return false;
116 E : ::common::AutoRecursiveLock lock(lock_);
117 E : size_t slab_index = GetSlabIndex(alloc);
118 E : if (slab_index == kInvalidSlabIndex)
119 i : return false;
120 E : if (slab_info_[slab_index].state == kFreeSlab)
121 E : return false;
122 E : if (slab_info_[slab_index].info.header != alloc)
123 E : return false;
124 E : return true;
125 E : }
126 :
127 E : uint32_t ZebraBlockHeap::GetAllocationSize(const void* alloc) {
128 E : if (alloc == NULL)
129 i : return kUnknownSize;
130 E : ::common::AutoRecursiveLock lock(lock_);
131 E : size_t slab_index = GetSlabIndex(alloc);
132 E : if (slab_index == kInvalidSlabIndex)
133 i : return kUnknownSize;
134 E : if (slab_info_[slab_index].state == kFreeSlab)
135 i : return kUnknownSize;
136 E : if (slab_info_[slab_index].info.header != alloc)
137 i : return kUnknownSize;
138 E : return slab_info_[slab_index].info.block_size;
139 E : }
140 :
141 E : void ZebraBlockHeap::Lock() {
142 E : lock_.Acquire();
143 E : }
144 :
145 E : void ZebraBlockHeap::Unlock() {
146 E : lock_.Release();
147 E : }
148 :
149 E : bool ZebraBlockHeap::TryLock() {
150 E : return lock_.Try();
151 E : }
152 :
153 : void* ZebraBlockHeap::AllocateBlock(uint32_t size,
154 : uint32_t min_left_redzone_size,
155 : uint32_t min_right_redzone_size,
156 E : BlockLayout* layout) {
157 E : DCHECK_NE(static_cast<BlockLayout*>(nullptr), layout);
158 : // Abort if the redzones do not fit in a page. Even if the allocation
159 : // is possible it will lead to a non-standard block layout.
160 E : if (min_left_redzone_size + size > GetPageSize())
161 E : return NULL;
162 E : if (min_right_redzone_size > GetPageSize())
163 E : return NULL;
164 :
165 : // Plan the block layout.
166 E : if (!BlockPlanLayout(static_cast<uint32_t>(GetPageSize()),
167 : kShadowRatio,
168 : size,
169 : min_left_redzone_size,
170 : std::max(static_cast<uint32_t>(GetPageSize()),
171 : min_right_redzone_size),
172 : layout)) {
173 i : return nullptr;
174 : }
175 :
176 E : if (layout->block_size != kSlabSize)
177 E : return nullptr;
178 E : uint32_t right_redzone_size = layout->trailer_size +
179 : layout->trailer_padding_size;
180 : // Part of the body lies inside an "odd" page.
181 E : if (right_redzone_size < GetPageSize())
182 i : return nullptr;
183 : // There should be less than kShadowRatio bytes between the body end
184 : // and the "odd" page.
185 E : if (right_redzone_size - GetPageSize() >= kShadowRatio)
186 i : return nullptr;
187 :
188 : // Allocate space for the block, and update the slab info to reflect the right
189 : // redzone.
190 E : void* alloc = nullptr;
191 E : SlabInfo* slab_info = AllocateImpl(static_cast<uint32_t>(GetPageSize()));
192 E : if (slab_info != nullptr) {
193 E : slab_info->info.block_size = layout->block_size;
194 E : slab_info->info.header_size = layout->header_size +
195 : layout->header_padding_size;
196 E : slab_info->info.trailer_size = layout->trailer_size +
197 : layout->trailer_padding_size;
198 E : slab_info->info.is_nested = false;
199 E : alloc = slab_info->info.header;
200 : }
201 :
202 E : DCHECK_EQ(0u, reinterpret_cast<uintptr_t>(alloc) % kShadowRatio);
203 E : return alloc;
204 E : }
205 :
206 E : bool ZebraBlockHeap::FreeBlock(const BlockInfo& block_info) {
207 E : DCHECK_NE(static_cast<BlockHeader*>(nullptr), block_info.header);
208 E : if (!Free(block_info.header))
209 i : return false;
210 E : return true;
211 E : }
212 :
213 E : PushResult ZebraBlockHeap::Push(const CompactBlockInfo& info) {
214 E : ::common::AutoRecursiveLock lock(lock_);
215 E : size_t slab_index = GetSlabIndex(info.header);
216 E : PushResult result = {false, 0};
217 E : if (slab_index == kInvalidSlabIndex)
218 i : return result;
219 E : if (slab_info_[slab_index].state != kAllocatedSlab)
220 i : return result;
221 : if (::memcmp(&slab_info_[slab_index].info, &info,
222 E : sizeof(info)) != 0) {
223 i : return result;
224 : }
225 :
226 E : quarantine_.push(slab_index);
227 E : slab_info_[slab_index].state = kQuarantinedSlab;
228 E : result.push_successful = true;
229 E : result.trim_status |= TrimStatusBits::SYNC_TRIM_REQUIRED;
230 E : return result;
231 E : }
232 :
233 E : PopResult ZebraBlockHeap::Pop(CompactBlockInfo* info) {
234 E : ::common::AutoRecursiveLock lock(lock_);
235 E : PopResult result = {false, TrimColor::GREEN};
236 E : if (QuarantineInvariantIsSatisfied())
237 E : return result;
238 :
239 E : size_t slab_index = quarantine_.front();
240 E : DCHECK_NE(kInvalidSlabIndex, slab_index);
241 E : quarantine_.pop();
242 :
243 E : DCHECK_EQ(kQuarantinedSlab, slab_info_[slab_index].state);
244 E : slab_info_[slab_index].state = kAllocatedSlab;
245 E : *info = slab_info_[slab_index].info;
246 :
247 E : result.pop_successful = true;
248 E : return result;
249 E : }
250 :
251 E : void ZebraBlockHeap::Empty(ObjectVector* infos) {
252 E : ::common::AutoRecursiveLock lock(lock_);
253 E : while (!quarantine_.empty()) {
254 E : size_t slab_index = quarantine_.front();
255 E : DCHECK_NE(kInvalidSlabIndex, slab_index);
256 E : quarantine_.pop();
257 :
258 : // Do not free the slab, only release it from the quarantine.
259 E : slab_info_[slab_index].state = kAllocatedSlab;
260 E : infos->push_back(slab_info_[slab_index].info);
261 E : }
262 E : }
263 :
264 E : size_t ZebraBlockHeap::GetCountForTesting() {
265 E : ::common::AutoRecursiveLock lock(lock_);
266 E : return quarantine_.size();
267 E : }
268 :
269 E : void ZebraBlockHeap::set_quarantine_ratio(float quarantine_ratio) {
270 E : DCHECK_LE(0, quarantine_ratio);
271 E : DCHECK_GE(1, quarantine_ratio);
272 E : ::common::AutoRecursiveLock lock(lock_);
273 E : quarantine_ratio_ = quarantine_ratio;
274 E : }
275 :
276 E : ZebraBlockHeap::SlabInfo* ZebraBlockHeap::AllocateImpl(uint32_t bytes) {
277 E : CHECK_LE(bytes, (1u << 30));
278 E : if (bytes == 0 || bytes > GetPageSize())
279 E : return NULL;
280 E : ::common::AutoRecursiveLock lock(lock_);
281 :
282 E : if (free_slabs_.empty())
283 E : return NULL;
284 :
285 E : size_t slab_index = free_slabs_.front();
286 E : DCHECK_NE(kInvalidSlabIndex, slab_index);
287 E : free_slabs_.pop();
288 E : uint8_t* slab_address = GetSlabAddress(slab_index);
289 E : DCHECK_NE(static_cast<uint8_t*>(nullptr), slab_address);
290 :
291 : // Push the allocation to the end of the even page.
292 E : uint8_t* alloc = slab_address + GetPageSize() - bytes;
293 E : alloc = ::common::AlignDown(alloc, kShadowRatio);
294 :
295 : // Update the slab info.
296 E : SlabInfo* slab_info = &slab_info_[slab_index];
297 E : slab_info->state = kAllocatedSlab;
298 E : slab_info->info.header = reinterpret_cast<BlockHeader*>(alloc);
299 E : slab_info->info.block_size = static_cast<uint32_t>(bytes);
300 E : slab_info->info.header_size = 0;
301 E : slab_info->info.trailer_size = 0;
302 E : slab_info->info.is_nested = false;
303 :
304 E : return slab_info;
305 E : }
306 :
307 E : bool ZebraBlockHeap::QuarantineInvariantIsSatisfied() {
308 E : return quarantine_.empty() ||
309 : (quarantine_.size() / static_cast<float>(slab_count_) <=
310 : quarantine_ratio_);
311 E : }
312 :
313 E : uint8_t* ZebraBlockHeap::GetSlabAddress(size_t index) {
314 E : if (index >= slab_count_)
315 i : return NULL;
316 E : return heap_address_ + index * kSlabSize;
317 E : }
318 :
319 E : size_t ZebraBlockHeap::GetSlabIndex(const void* address) {
320 E : if (address < heap_address_ || address >= heap_address_ + heap_size_)
321 i : return kInvalidSlabIndex;
322 E : return (static_cast<const uint8_t*>(address) - heap_address_) /
323 : kSlabSize;
324 E : }
325 :
326 : } // namespace heaps
327 : } // namespace asan
328 : } // namespace agent
|