1 : // Copyright 2012 Google Inc. All Rights Reserved.
2 : //
3 : // Licensed under the Apache License, Version 2.0 (the "License");
4 : // you may not use this file except in compliance with the License.
5 : // You may obtain a copy of the License at
6 : //
7 : // http://www.apache.org/licenses/LICENSE-2.0
8 : //
9 : // Unless required by applicable law or agreed to in writing, software
10 : // distributed under the License is distributed on an "AS IS" BASIS,
11 : // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 : // See the License for the specific language governing permissions and
13 : // limitations under the License.
14 :
15 : #include "syzygy/agent/asan/stack_capture_cache.h"
16 :
17 : #include <algorithm>
18 :
19 : #include "base/logging.h"
20 : #include "base/strings/stringprintf.h"
21 : #include "syzygy/agent/asan/asan_logger.h"
22 : #include "syzygy/agent/common/stack_capture.h"
23 : #include "syzygy/common/align.h"
24 :
25 : namespace agent {
26 : namespace asan {
27 :
28 : namespace {
29 :
30 : // Gives us access to the first frame of a stack capture as link-list pointer.
31 : common::StackCapture** GetFirstFrameAsLink(
32 E : common::StackCapture* stack_capture) {
33 E : DCHECK(stack_capture != NULL);
34 : common::StackCapture** link = reinterpret_cast<common::StackCapture**>(
35 E : const_cast<void**>(stack_capture->frames()));
36 E : DCHECK(link != NULL);
37 E : return link;
38 E : }
39 :
40 : } // namespace
41 :
42 : size_t StackCaptureCache::compression_reporting_period_ =
43 E : ::common::kDefaultReportingPeriod;
44 :
45 E : StackCaptureCache::CachePage::~CachePage() {
46 : // It's our parent StackCaptureCache's responsibility to clean up the linked
47 : // list of cache pages. We balk if we're being deleted and haven't been
48 : // properly unlinked from the linked list.
49 E : DCHECK(next_page_ == NULL);
50 E : Shadow::Unpoison(this, sizeof(CachePage));
51 E : }
52 :
53 : common::StackCapture* StackCaptureCache::CachePage::GetNextStackCapture(
54 E : size_t max_num_frames) {
55 E : size_t size = common::StackCapture::GetSize(max_num_frames);
56 E : if (bytes_used_ + size > kDataSize)
57 E : return NULL;
58 :
59 : // Use placement new.
60 : common::StackCapture* stack =
61 E : new(data_ + bytes_used_) common::StackCapture(max_num_frames);
62 E : bytes_used_ += size;
63 :
64 E : return stack;
65 E : }
66 :
67 : bool StackCaptureCache::CachePage::ReturnStackCapture(
68 E : common::StackCapture* stack_capture) {
69 E : DCHECK(stack_capture != NULL);
70 :
71 E : uint8* stack = reinterpret_cast<uint8*>(stack_capture);
72 E : size_t size = stack_capture->Size();
73 :
74 : // If this was the last stack capture provided by this page then the end of
75 : // it must align with our current data pointer.
76 E : if (data_ + bytes_used_ != stack + size)
77 i : return false;
78 :
79 E : bytes_used_ -= size;
80 E : return true;
81 E : }
82 :
83 : StackCaptureCache::StackCaptureCache(AsanLogger* logger)
84 : : logger_(logger),
85 : max_num_frames_(common::StackCapture::kMaxNumFrames),
86 E : current_page_(new CachePage(NULL)) {
87 E : CHECK(current_page_ != NULL);
88 E : DCHECK(logger_ != NULL);
89 E : ::memset(&statistics_, 0, sizeof(statistics_));
90 E : ::memset(reclaimed_, 0, sizeof(reclaimed_));
91 E : statistics_.size = sizeof(CachePage);
92 E : }
93 :
94 : StackCaptureCache::StackCaptureCache(AsanLogger* logger, size_t max_num_frames)
95 : : logger_(logger),
96 : max_num_frames_(0),
97 E : current_page_(new CachePage(NULL)) {
98 E : CHECK(current_page_ != NULL);
99 E : DCHECK(logger_ != NULL);
100 E : DCHECK_LT(0u, max_num_frames);
101 : max_num_frames_ = static_cast<uint8>(
102 E : std::min(max_num_frames, common::StackCapture::kMaxNumFrames));
103 E : ::memset(&statistics_, 0, sizeof(statistics_));
104 E : ::memset(reclaimed_, 0, sizeof(reclaimed_));
105 E : statistics_.size = sizeof(CachePage);
106 E : }
107 :
108 E : StackCaptureCache::~StackCaptureCache() {
109 : // Clean up the linked list of cache pages.
110 E : while (current_page_ != NULL) {
111 E : CachePage* page = current_page_;
112 E : current_page_ = page->next_page_;
113 E : page->next_page_ = NULL;
114 E : delete page;
115 E : }
116 E : }
117 :
118 E : void StackCaptureCache::Init() {
119 E : compression_reporting_period_ = ::common::kDefaultReportingPeriod;
120 E : }
121 :
122 : const common::StackCapture* StackCaptureCache::SaveStackTrace(
123 E : StackId stack_id, const void* const* frames, size_t num_frames) {
124 E : DCHECK(frames != NULL);
125 E : DCHECK_NE(num_frames, 0U);
126 E : DCHECK(current_page_ != NULL);
127 :
128 E : bool already_cached = false;
129 E : common::StackCapture* stack_trace = NULL;
130 E : bool saturated = false;
131 :
132 : {
133 E : size_t known_stack_shard = stack_id % kKnownStacksSharding;
134 : // Get or insert the current stack trace while under the lock for this
135 : // bucket.
136 E : base::AutoLock auto_lock(known_stacks_locks_[known_stack_shard]);
137 :
138 : // Check if the stack capture is already in the cache map.
139 E : common::StackCapture capture;
140 E : capture.set_stack_id(stack_id);
141 E : StackSet::iterator result = known_stacks_[known_stack_shard].find(&capture);
142 :
143 : // If this capture has not already been cached then we have to initialize
144 : // the data.
145 E : if (result == known_stacks_[known_stack_shard].end()) {
146 E : stack_trace = GetStackCapture(num_frames);
147 E : DCHECK(stack_trace != NULL);
148 E : stack_trace->InitFromBuffer(stack_id, frames, num_frames);
149 : std::pair<StackSet::iterator, bool> it =
150 E : known_stacks_[known_stack_shard].insert(stack_trace);
151 E : DCHECK(it.second);
152 E : DCHECK(stack_trace->HasNoRefs());
153 E : } else {
154 E : already_cached = true;
155 E : stack_trace = *result;
156 : }
157 : // Increment the reference count for this stack trace.
158 E : if (!stack_trace->RefCountIsSaturated()) {
159 E : stack_trace->AddRef();
160 E : } else {
161 E : saturated = true;
162 : }
163 E : }
164 E : DCHECK(stack_trace != NULL);
165 :
166 E : bool must_log = false;
167 E : Statistics statistics = {};
168 : // Update the statistics.
169 E : if (compression_reporting_period_ != 0) {
170 E : base::AutoLock stats_lock(stats_lock_);
171 E : if (already_cached) {
172 : // If the existing stack capture is previously unreferenced and becoming
173 : // referenced again, then decrement the unreferenced counter.
174 E : if (stack_trace->HasNoRefs()) {
175 i : DCHECK_LT(0u, statistics_.unreferenced);
176 i : --statistics_.unreferenced;
177 : }
178 E : } else {
179 E : ++statistics_.cached;
180 E : statistics_.frames_alive += num_frames;
181 E : ++statistics_.allocated;
182 : }
183 E : if (!saturated && stack_trace->RefCountIsSaturated()) {
184 E : saturated = true;
185 E : ++statistics_.saturated;
186 : }
187 E : ++statistics_.requested;
188 E : ++statistics_.references;
189 E : statistics_.frames_stored += num_frames;
190 E : if (statistics_.requested % compression_reporting_period_ == 0) {
191 E : must_log = true;
192 E : GetStatisticsUnlocked(&statistics);
193 : }
194 E : }
195 :
196 E : if (must_log)
197 E : LogStatisticsImpl(statistics);
198 :
199 : // Return the stack trace pointer that is now in the cache.
200 E : return stack_trace;
201 E : }
202 :
203 : const common::StackCapture* StackCaptureCache::SaveStackTrace(
204 E : const common::StackCapture& stack_capture) {
205 : return SaveStackTrace(stack_capture.stack_id(),
206 : stack_capture.frames(),
207 E : stack_capture.num_frames());
208 E : }
209 :
210 : void StackCaptureCache::ReleaseStackTrace(
211 E : const common::StackCapture* stack_capture) {
212 E : DCHECK(stack_capture != NULL);
213 :
214 E : size_t known_stack_shard = stack_capture->stack_id() % kKnownStacksSharding;
215 E : bool add_to_reclaimed_list = false;
216 E : common::StackCapture* stack = NULL;
217 : {
218 E : base::AutoLock auto_lock(known_stacks_locks_[known_stack_shard]);
219 :
220 : // We own the stack so its fine to remove the const. We double check this
221 : // is the case in debug builds with the DCHECK.
222 E : stack = const_cast<common::StackCapture*>(stack_capture);
223 : DCHECK(known_stacks_[known_stack_shard].find(stack) !=
224 E : known_stacks_[known_stack_shard].end());
225 :
226 E : stack->RemoveRef();
227 :
228 E : if (stack->HasNoRefs()) {
229 E : add_to_reclaimed_list = true;
230 : // Remove this from the known stacks as we're going to reclaim it and
231 : // overwrite part of its data as we insert into the reclaimed_ list.
232 E : size_t num_erased = known_stacks_[known_stack_shard].erase(stack);
233 E : DCHECK_EQ(num_erased, 1u);
234 : }
235 E : }
236 :
237 : // Update the statistics.
238 E : if (compression_reporting_period_ != 0) {
239 E : base::AutoLock stats_lock(stats_lock_);
240 E : DCHECK_LT(0u, statistics_.references);
241 E : --statistics_.references;
242 E : statistics_.frames_stored -= stack->num_frames();
243 E : if (add_to_reclaimed_list) {
244 E : --statistics_.cached;
245 E : ++statistics_.unreferenced;
246 : // The frames in this stack capture are no longer alive.
247 E : statistics_.frames_alive -= stack->num_frames();
248 : }
249 E : }
250 :
251 : // Link this stack capture into the list of reclaimed stacks. This
252 : // must come after the statistics updating, as we modify the |num_frames|
253 : // parameter in place.
254 E : if (add_to_reclaimed_list)
255 E : AddStackCaptureToReclaimedList(stack);
256 E : }
257 :
258 : bool StackCaptureCache::StackCapturePointerIsValid(
259 E : const common::StackCapture* stack_capture) {
260 : // All stack captures must have pointer alignment at least.
261 E : if (!::common::IsAligned(stack_capture, sizeof(uintptr_t)))
262 E : return false;
263 :
264 : const uint8* stack_capture_addr =
265 E : reinterpret_cast<const uint8*>(stack_capture);
266 :
267 : // Walk over the allocated pages and see if it lands within any of them.
268 E : base::AutoLock lock(current_page_lock_);
269 E : CachePage* page = current_page_;
270 E : while (page != NULL) {
271 E : const uint8* page_end = page->data() + page->bytes_used();
272 :
273 : // If the proposed stack capture lands within a page we then check to
274 : // ensure that it is also internally consistent. This can still fail
275 : // but is somewhat unlikely.
276 E : static const size_t kMinSize = common::StackCapture::GetSize(1);
277 : if (stack_capture_addr >= page->data() &&
278 : stack_capture_addr + kMinSize <= page_end &&
279 : stack_capture_addr + stack_capture->Size() <= page_end &&
280 : stack_capture->num_frames() <= stack_capture->max_num_frames() &&
281 : stack_capture->max_num_frames() <=
282 E : common::StackCapture::kMaxNumFrames) {
283 E : return true;
284 : }
285 E : page = page->next_page_;
286 E : }
287 E : return false;
288 E : }
289 :
290 E : void StackCaptureCache::LogStatistics() {
291 E : Statistics statistics = {};
292 :
293 : {
294 E : base::AutoLock auto_lock(stats_lock_);
295 E : GetStatisticsUnlocked(&statistics);
296 E : }
297 :
298 E : LogStatisticsImpl(statistics);
299 E : }
300 :
301 E : void StackCaptureCache::GetStatisticsUnlocked(Statistics* statistics) const {
302 : #ifndef NDEBUG
303 E : stats_lock_.AssertAcquired();
304 : #endif
305 :
306 E : DCHECK(statistics != NULL);
307 E : *statistics = statistics_;
308 E : }
309 :
310 E : void StackCaptureCache::LogStatisticsImpl(const Statistics& statistics) const {
311 : // The cache has 3 categories of storage.
312 : // alive frames: these are actively participating in storing a stack trace.
313 : // dead frames: these are unreferenced stack traces that are eligible for
314 : // reuse, but are currently dormant.
315 : // overhead: frames in a stack-capture that aren't used, padding at the end
316 : // cache pages, cache page metadata, stack capture metadata, etc.
317 :
318 : // These are all in bytes.
319 E : double cache_size = statistics.size;
320 E : double alive_size = statistics.frames_alive * 4;
321 E : double dead_size = statistics.frames_dead * 4;
322 E : double stored_size = statistics.frames_stored * 4;
323 :
324 : // The |cache_size| is the actual size of storage taken, while |stored_size|
325 : // is the conceptual amount of frame data that is stored in the cache.
326 E : double compression = 100.0 * (1.0 - (cache_size / stored_size));
327 E : double alive = 100.0 * alive_size / cache_size;
328 E : double dead = 100.0 * dead_size / cache_size;
329 E : double overhead = 100.0 - alive - dead;
330 :
331 : logger_->Write(base::StringPrintf(
332 : "PID=%d; Stack cache size=%.2f MB; Compression=%.2f%%; "
333 : "Alive=%.2f%%; Dead=%.2f%%; Overhead=%.2f%%; Saturated=%d; Entries=%d",
334 : ::GetCurrentProcessId(),
335 : cache_size / 1024.0 / 1024.0,
336 : compression,
337 : alive,
338 : dead,
339 : overhead,
340 : statistics.saturated,
341 E : statistics.cached));
342 E : }
343 :
344 E : common::StackCapture* StackCaptureCache::GetStackCapture(size_t num_frames) {
345 E : common::StackCapture* stack_capture = NULL;
346 :
347 : // First look to the reclaimed stacks and try to use one of those. We'll use
348 : // the first one that's big enough.
349 E : for (size_t n = num_frames; n <= max_num_frames_; ++n) {
350 E : base::AutoLock lock(reclaimed_locks_[n]);
351 E : if (reclaimed_[n] != NULL) {
352 E : common::StackCapture* reclaimed_stack_capture = reclaimed_[n];
353 : common::StackCapture** link =
354 E : GetFirstFrameAsLink(reclaimed_stack_capture);
355 E : reclaimed_[n] = *link;
356 E : stack_capture = reclaimed_stack_capture;
357 E : break;
358 : }
359 E : }
360 :
361 E : if (stack_capture != NULL) {
362 E : if (compression_reporting_period_ != 0) {
363 i : base::AutoLock stats_lock(stats_lock_);
364 : // These frames are no longer dead, but in limbo. If the stack capture
365 : // is used they'll be added to frames_alive and frames_stored.
366 i : statistics_.frames_dead -= stack_capture->max_num_frames();
367 i : }
368 E : return stack_capture;
369 : }
370 :
371 E : common::StackCapture* unused_stack_capture = NULL;
372 : {
373 E : base::AutoLock current_page_lock(current_page_lock_);
374 :
375 : // We didn't find a reusable stack capture. Go to the cache page.
376 E : stack_capture = current_page_->GetNextStackCapture(num_frames);
377 :
378 E : if (stack_capture != NULL)
379 E : return stack_capture;
380 :
381 : // If the allocation failed we don't have enough room on the current page.
382 :
383 : // Use the remaining bytes to create one more maximally sized stack
384 : // capture. We will stuff this into the reclaimed_ structure for later
385 : // use.
386 i : size_t bytes_left = current_page_->bytes_left();
387 i : size_t max_num_frames = common::StackCapture::GetMaxNumFrames(bytes_left);
388 i : if (max_num_frames > 0) {
389 i : DCHECK_LT(max_num_frames, num_frames);
390 i : DCHECK_LE(common::StackCapture::GetSize(max_num_frames), bytes_left);
391 : unused_stack_capture =
392 i : current_page_->GetNextStackCapture(max_num_frames);
393 i : DCHECK(unused_stack_capture != NULL);
394 : }
395 :
396 : // Allocate a new page (that links to the current page) and use it to
397 : // allocate a new stack capture.
398 i : current_page_ = new CachePage(current_page_);
399 i : CHECK(current_page_ != NULL);
400 i : statistics_.size += sizeof(CachePage);
401 i : stack_capture = current_page_->GetNextStackCapture(num_frames);
402 i : }
403 :
404 i : if (unused_stack_capture != NULL) {
405 : // We're creating an unreferenced stack capture.
406 i : AddStackCaptureToReclaimedList(unused_stack_capture);
407 : }
408 :
409 : // Update the statistics.
410 i : if (compression_reporting_period_ != 0) {
411 i : base::AutoLock stats_lock(stats_lock_);
412 i : ++statistics_.unreferenced;
413 i : }
414 :
415 i : DCHECK(stack_capture != NULL);
416 i : return stack_capture;
417 E : }
418 :
419 : namespace {
420 :
421 : class PrivateStackCapture : public common::StackCapture {
422 : public:
423 : // Expose the actual number of frames. We use this to make reclaimed
424 : // stack captures look invalid when they're in a free list.
425 : using common::StackCapture::num_frames_;
426 : };
427 :
428 : }
429 :
430 : void StackCaptureCache::AddStackCaptureToReclaimedList(
431 E : common::StackCapture* stack_capture) {
432 E : DCHECK(stack_capture != NULL);
433 :
434 : // Make the stack capture internally inconsistent so that it can't be
435 : // interpreted as being valid. This is rewritten upon reuse so not
436 : // dangerous.
437 E : reinterpret_cast<PrivateStackCapture*>(stack_capture)->num_frames_ = -1;
438 :
439 : {
440 E : base::AutoLock lock(reclaimed_locks_[stack_capture->max_num_frames()]);
441 :
442 E : common::StackCapture** link = GetFirstFrameAsLink(stack_capture);
443 E : size_t num_frames = stack_capture->max_num_frames();
444 E : *link = reclaimed_[num_frames];
445 E : reclaimed_[num_frames] = stack_capture;
446 E : }
447 :
448 : // Update the statistics.
449 E : if (compression_reporting_period_ != 0) {
450 E : base::AutoLock stats_lock(stats_lock_);
451 E : statistics_.frames_dead += stack_capture->max_num_frames();
452 E : }
453 E : }
454 :
455 : } // namespace asan
456 : } // namespace agent
|