summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2012-12-26 06:30:02 +0000
committerKostya Serebryany <kcc@google.com>2012-12-26 06:30:02 +0000
commit9e3bd38388a7c182db57f6e3fc0943e6d12f012e (patch)
tree5c14af17f552d06189b6982d82f858bbb158f802 /lib
parent111a0716d714aa2597e333d160cf1f271695bab7 (diff)
[asan] asan_allocator2: by default use the StackDepot to store the stack traces instead of storing them in the redzones
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@171099 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/asan/asan_allocator2.cc44
-rw-r--r--lib/asan/asan_flags.h2
-rw-r--r--lib/asan/asan_rtl.cc2
-rw-r--r--lib/asan/asan_stats.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepot.cc10
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepot.h7
6 files changed, 59 insertions, 10 deletions
diff --git a/lib/asan/asan_allocator2.cc b/lib/asan/asan_allocator2.cc
index e584f7ed0..6d85af67e 100644
--- a/lib/asan/asan_allocator2.cc
+++ b/lib/asan/asan_allocator2.cc
@@ -26,6 +26,7 @@
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
@@ -162,9 +163,8 @@ struct AsanChunk: ChunkBase {
}
void *AllocBeg() {
if (from_memalign)
- return reinterpret_cast<uptr>(
- allocator.GetBlockBegin(reinterpret_cast<void *>(this)));
- return Beg() - ComputeRZSize(0);
+ return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
+ return reinterpret_cast<void*>(Beg() - ComputeRZSize(0));
}
// We store the alloc/free stack traces in the chunk itself.
u32 *AllocStackBeg() {
@@ -189,14 +189,29 @@ uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
+static void GetStackTraceFromId(u32 id, StackTrace *stack) {
+ CHECK(id);
+ uptr size = 0;
+ const uptr *trace = StackDepotGet(id, &size);
+ CHECK_LT(size, kStackTraceMax);
+ internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
+ stack->size = size;
+}
+
void AsanChunkView::GetAllocStack(StackTrace *stack) {
- StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
- chunk_->AllocStackSize());
+ if (flags()->use_stack_depot)
+ GetStackTraceFromId(chunk_->alloc_context_id, stack);
+ else
+ StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
+ chunk_->AllocStackSize());
}
void AsanChunkView::GetFreeStack(StackTrace *stack) {
- StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
- chunk_->FreeStackSize());
+ if (flags()->use_stack_depot)
+ GetStackTraceFromId(chunk_->free_context_id, stack);
+ else
+ StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
+ chunk_->FreeStackSize());
}
class Quarantine: public AsanChunkFifoList {
@@ -341,7 +356,13 @@ static void *Allocate(uptr size, uptr alignment, StackTrace *stack,
m->user_requested_size = SizeClassMap::kMaxSize;
*reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)) = size;
}
- StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
+
+ if (flags()->use_stack_depot) {
+ m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
+ } else {
+ m->alloc_context_id = 0;
+ StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
+ }
uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
// Unpoison the bulk of the memory region.
@@ -391,7 +412,12 @@ static void Deallocate(void *ptr, StackTrace *stack, AllocType alloc_type) {
CHECK_EQ(m->free_tid, kInvalidTid);
AsanThread *t = asanThreadRegistry().GetCurrent();
m->free_tid = t ? t->tid() : 0;
- StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
+ if (flags()->use_stack_depot) {
+ m->free_context_id = StackDepotPut(stack->trace, stack->size);
+ } else {
+ m->free_context_id = 0;
+ StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
+ }
CHECK(m->chunk_state == CHUNK_QUARANTINE);
// Poison the region.
PoisonShadow(m->Beg(),
diff --git a/lib/asan/asan_flags.h b/lib/asan/asan_flags.h
index 296a5bbf7..d7b21ea4a 100644
--- a/lib/asan/asan_flags.h
+++ b/lib/asan/asan_flags.h
@@ -104,6 +104,8 @@ struct Flags {
bool poison_heap;
// Report errors on malloc/delete, new/free, new/delete[], etc.
bool alloc_dealloc_mismatch;
+ // Use stack depot instead of storing stacks in the redzones.
+ bool use_stack_depot;
};
Flags *flags();
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index 80008f641..eb9f8440e 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -108,6 +108,7 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
ParseFlag(str, &f->poison_heap, "poison_heap");
ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
+ ParseFlag(str, &f->use_stack_depot, "use_stack_depot");
}
void InitializeFlags(Flags *f, const char *env) {
@@ -145,6 +146,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->fast_unwind_on_malloc = true;
f->poison_heap = true;
f->alloc_dealloc_mismatch = false;
+ f->use_stack_depot = true; // Only affects allocator2.
// Override from user-specified string.
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
diff --git a/lib/asan/asan_stats.cc b/lib/asan/asan_stats.cc
index 31786e90f..152904613 100644
--- a/lib/asan/asan_stats.cc
+++ b/lib/asan/asan_stats.cc
@@ -17,6 +17,7 @@
#include "asan_stats.h"
#include "asan_thread_registry.h"
#include "sanitizer/asan_interface.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
namespace __asan {
@@ -62,6 +63,9 @@ static void PrintAccumulatedStats() {
// Use lock to keep reports from mixing up.
ScopedLock lock(&print_lock);
stats.Print();
+ StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
+ stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
}
} // namespace __asan
diff --git a/lib/sanitizer_common/sanitizer_stackdepot.cc b/lib/sanitizer_common/sanitizer_stackdepot.cc
index 6fb3d2dcb..08e523832 100644
--- a/lib/sanitizer_common/sanitizer_stackdepot.cc
+++ b/lib/sanitizer_common/sanitizer_stackdepot.cc
@@ -42,6 +42,12 @@ static struct {
atomic_uint32_t seq[kPartCount]; // Unique id generators.
} depot;
+static StackDepotStats stats;
+
+StackDepotStats *StackDepotGetStats() {
+ return &stats;
+}
+
static u32 hash(const uptr *stack, uptr size) {
// murmur2
const u32 m = 0x5bd1e995;
@@ -77,7 +83,7 @@ static StackDesc *tryallocDesc(uptr memsz) {
}
static StackDesc *allocDesc(uptr size) {
- // Frist, try to allocate optimisitically.
+ // First, try to allocate optimisitically.
uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
StackDesc *s = tryallocDesc(memsz);
if (s)
@@ -93,6 +99,7 @@ static StackDesc *allocDesc(uptr size) {
if (allocsz < memsz)
allocsz = memsz;
uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+ stats.mapped += allocsz;
atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
atomic_store(&depot.region_pos, mem, memory_order_release);
}
@@ -156,6 +163,7 @@ u32 StackDepotPut(const uptr *stack, uptr size) {
}
uptr part = (h % kTabSize) / kPartSize;
id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
+ stats.n_uniq_ids++;
CHECK_LT(id, kMaxId);
id |= part << kPartShift;
CHECK_NE(id, 0);
diff --git a/lib/sanitizer_common/sanitizer_stackdepot.h b/lib/sanitizer_common/sanitizer_stackdepot.h
index 98db08a9f..49e6669dd 100644
--- a/lib/sanitizer_common/sanitizer_stackdepot.h
+++ b/lib/sanitizer_common/sanitizer_stackdepot.h
@@ -24,6 +24,13 @@ u32 StackDepotPut(const uptr *stack, uptr size);
// Retrieves a stored stack trace by the id.
const uptr *StackDepotGet(u32 id, uptr *size);
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr mapped;
+};
+
+StackDepotStats *StackDepotGetStats();
+
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOT_H