summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/asan/asan_stats.cc4
-rw-r--r--lib/msan/CMakeLists.txt1
-rw-r--r--lib/msan/msan.cc75
-rw-r--r--lib/msan/msan.h8
-rw-r--r--lib/msan/msan_allocator.cc14
-rw-r--r--lib/msan/msan_chained_origin_depot.cc82
-rw-r--r--lib/msan/msan_chained_origin_depot.h26
-rw-r--r--lib/msan/msan_flags.h2
-rw-r--r--lib/msan/msan_interceptors.cc8
-rw-r--r--lib/msan/msan_interface_internal.h2
-rw-r--r--lib/msan/msan_origin.h76
-rw-r--r--lib/msan/msan_report.cc50
-rw-r--r--lib/msan/tests/msan_test.cc52
-rw-r--r--lib/sanitizer_common/CMakeLists.txt3
-rw-r--r--lib/sanitizer_common/sanitizer_common.h5
-rw-r--r--lib/sanitizer_common/sanitizer_persistent_allocator.cc19
-rw-r--r--lib/sanitizer_common/sanitizer_persistent_allocator.h71
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepot.cc259
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepot.h29
-rw-r--r--lib/sanitizer_common/sanitizer_stackdepotbase.h153
20 files changed, 652 insertions, 287 deletions
diff --git a/lib/asan/asan_stats.cc b/lib/asan/asan_stats.cc
index 73dc3c5ac..5af37e21a 100644
--- a/lib/asan/asan_stats.cc
+++ b/lib/asan/asan_stats.cc
@@ -129,8 +129,8 @@ static void PrintAccumulatedStats() {
BlockingMutexLock lock(&print_lock);
stats.Print();
StackDepotStats *stack_depot_stats = StackDepotGetStats();
- Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
- stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
+ Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
+ stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
PrintInternalAllocatorStats();
}
diff --git a/lib/msan/CMakeLists.txt b/lib/msan/CMakeLists.txt
index e046a7729..4d69a2548 100644
--- a/lib/msan/CMakeLists.txt
+++ b/lib/msan/CMakeLists.txt
@@ -4,6 +4,7 @@ include_directories(..)
set(MSAN_RTL_SOURCES
msan.cc
msan_allocator.cc
+ msan_chained_origin_depot.cc
msan_interceptors.cc
msan_linux.cc
msan_new_delete.cc
diff --git a/lib/msan/msan.cc b/lib/msan/msan.cc
index 62f582aaa..308be461b 100644
--- a/lib/msan/msan.cc
+++ b/lib/msan/msan.cc
@@ -13,6 +13,8 @@
//===----------------------------------------------------------------------===//
#include "msan.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
#include "msan_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -117,6 +119,28 @@ static void ParseFlagsFromString(Flags *f, const char *str) {
Printf("Exit code not in [0, 128) range: %d\n", f->exit_code);
Die();
}
+ ParseFlag(str, &f->origin_history_size, "origin_history_size", "");
+ if (f->origin_history_size < 0 ||
+ f->origin_history_size > Origin::kMaxDepth) {
+ Printf(
+ "Origin history size invalid: %d. Must be 0 (unlimited) or in [1, %d] "
+ "range.\n",
+ f->origin_history_size, Origin::kMaxDepth);
+ Die();
+ }
+ ParseFlag(str, &f->origin_history_per_stack_limit,
+ "origin_history_per_stack_limit", "");
+ // Limiting to kStackDepotMaxUseCount / 2 to avoid overflow in
+ // StackDepotHandle::inc_use_count_unsafe.
+ if (f->origin_history_per_stack_limit < 0 ||
+ f->origin_history_per_stack_limit > kStackDepotMaxUseCount / 2) {
+ Printf(
+ "Origin per-stack limit invalid: %d. Must be 0 (unlimited) or in [1, "
+ "%d] range.\n",
+ f->origin_history_per_stack_limit, kStackDepotMaxUseCount / 2);
+ Die();
+ }
+
ParseFlag(str, &f->report_umrs, "report_umrs", "");
ParseFlag(str, &f->wrap_signals, "wrap_signals", "");
@@ -143,6 +167,8 @@ static void InitializeFlags(Flags *f, const char *options) {
f->poison_in_malloc = true;
f->poison_in_free = true;
f->exit_code = 77;
+ f->origin_history_size = Origin::kMaxDepth;
+ f->origin_history_per_stack_limit = 20000;
f->report_umrs = true;
f->wrap_signals = true;
f->halt_on_error = !&__msan_keep_going;
@@ -230,9 +256,7 @@ void ScopedThreadLocalStateBackup::Restore() {
void UnpoisonThreadLocalState() {
}
-const char *GetOriginDescrIfStack(u32 id, uptr *pc) {
- if ((id >> 31) == 0) return 0;
- id &= (1U << 31) - 1;
+const char *GetStackOriginDescr(u32 id, uptr *pc) {
CHECK_LT(id, kNumStackOriginDescrs);
if (pc) *pc = StackOriginPC[id];
return StackOriginDescr[id];
@@ -242,10 +266,30 @@ u32 ChainOrigin(u32 id, StackTrace *stack) {
MsanThread *t = GetCurrentThread();
if (t && t->InSignalHandler())
return id;
- uptr idx = Min(stack->size, kStackTraceMax - 1);
- stack->trace[idx] = TRACE_MAKE_CHAINED(id);
- u32 new_id = StackDepotPut(stack->trace, idx + 1);
- return new_id;
+
+ Origin o(id);
+ int depth = o.depth();
+ // 0 means unlimited depth.
+ if (flags()->origin_history_size > 0 && depth > 0) {
+ if (depth >= flags()->origin_history_size) {
+ return id;
+ } else {
+ ++depth;
+ }
+ }
+
+ StackDepotHandle h = StackDepotPut_WithHandle(stack->trace, stack->size);
+ if (!h.valid()) return id;
+ int use_count = h.use_count();
+ if (use_count > flags()->origin_history_per_stack_limit)
+ return id;
+
+ u32 chained_id;
+ bool inserted = ChainedOriginDepotPut(h.id(), o.id(), &chained_id);
+
+ if (inserted) h.inc_use_count_unsafe();
+
+ return Origin(chained_id, depth).raw_id();
}
} // namespace __msan
@@ -514,16 +558,15 @@ void __msan_set_alloca_origin4(void *a, uptr size, const char *descr, uptr pc) {
bool print = false; // internal_strstr(descr + 4, "AllocaTOTest") != 0;
u32 id = *id_ptr;
if (id == first_timer) {
- id = atomic_fetch_add(&NumStackOriginDescrs,
- 1, memory_order_relaxed);
+ u32 idx = atomic_fetch_add(&NumStackOriginDescrs, 1, memory_order_relaxed);
+ CHECK_LT(idx, kNumStackOriginDescrs);
+ StackOriginDescr[idx] = descr + 4;
+ StackOriginPC[idx] = pc;
+ ChainedOriginDepotPut(idx, Origin::kStackRoot, &id);
*id_ptr = id;
- CHECK_LT(id, kNumStackOriginDescrs);
- StackOriginDescr[id] = descr + 4;
- StackOriginPC[id] = pc;
if (print)
- Printf("First time: id=%d %s %p \n", id, descr + 4, pc);
+ Printf("First time: idx=%d id=%d %s %p \n", idx, id, descr + 4, pc);
}
- id |= 1U << 31;
if (print)
Printf("__msan_set_alloca_origin: descr=%s id=%x\n", descr + 4, id);
__msan_set_origin(a, size, id);
@@ -536,10 +579,6 @@ u32 __msan_chain_origin(u32 id) {
return ChainOrigin(id, &stack);
}
-const char *__msan_get_origin_descr_if_stack(u32 id) {
- return GetOriginDescrIfStack(id, 0);
-}
-
u32 __msan_get_origin(const void *a) {
if (!__msan_get_track_origins()) return 0;
uptr x = (uptr)a;
diff --git a/lib/msan/msan.h b/lib/msan/msan.h
index c307534b0..65e233c69 100644
--- a/lib/msan/msan.h
+++ b/lib/msan/msan.h
@@ -32,12 +32,6 @@
#define MEM_IS_SHADOW(mem) \
((uptr)mem >= 0x200000000000ULL && (uptr)mem <= 0x400000000000ULL)
-// Chained stack trace format.
-#define TRACE_MAGIC_MASK 0xFFFFFFFF00000000LLU
-#define TRACE_MAKE_CHAINED(id) ((uptr)id | TRACE_MAGIC_MASK)
-#define TRACE_TO_CHAINED_ID(u) ((uptr)u & (~TRACE_MAGIC_MASK))
-#define TRACE_IS_CHAINED(u) ((((uptr)u) & TRACE_MAGIC_MASK) == TRACE_MAGIC_MASK)
-
const int kMsanParamTlsSizeInWords = 100;
const int kMsanRetvalTlsSizeInWords = 100;
@@ -59,7 +53,7 @@ void InstallTrapHandler();
void InstallAtExitHandler();
void ReplaceOperatorsNewAndDelete();
-const char *GetOriginDescrIfStack(u32 id, uptr *pc);
+const char *GetStackOriginDescr(u32 id, uptr *pc);
void EnterSymbolizer();
void ExitSymbolizer();
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index 90b9b31fb..e6da9c14b 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -16,6 +16,8 @@
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "msan.h"
#include "msan_allocator.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
#include "msan_thread.h"
namespace __msan {
@@ -101,9 +103,9 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (__msan_get_track_origins()) {
u32 stack_id = StackDepotPut(stack->trace, stack->size);
CHECK(stack_id);
- CHECK_EQ((stack_id >> 31),
- 0); // Higher bit is occupied by stack origins.
- __msan_set_origin(allocated, size, stack_id);
+ u32 id;
+ ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
+ __msan_set_origin(allocated, size, Origin(id, 1).raw_id());
}
}
MSAN_MALLOC_HOOK(allocated, size);
@@ -124,9 +126,9 @@ void MsanDeallocate(StackTrace *stack, void *p) {
if (__msan_get_track_origins()) {
u32 stack_id = StackDepotPut(stack->trace, stack->size);
CHECK(stack_id);
- CHECK_EQ((stack_id >> 31),
- 0); // Higher bit is occupied by stack origins.
- __msan_set_origin(p, size, stack_id);
+ u32 id;
+ ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
+ __msan_set_origin(p, size, Origin(id, 1).raw_id());
}
}
MsanThread *t = GetCurrentThread();
diff --git a/lib/msan/msan_chained_origin_depot.cc b/lib/msan/msan_chained_origin_depot.cc
new file mode 100644
index 000000000..a98bcf5d0
--- /dev/null
+++ b/lib/msan/msan_chained_origin_depot.cc
@@ -0,0 +1,82 @@
+//===-- msan_chained_origin_depot.cc -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#include "msan_chained_origin_depot.h"
+
+#include "sanitizer_common/sanitizer_stackdepotbase.h"
+
+namespace __msan {
+
+struct ChainedOriginDepotDesc {
+ u32 here_id;
+ u32 prev_id;
+ u32 hash() const { return here_id ^ prev_id; }
+ bool is_valid() { return true; }
+};
+
+struct ChainedOriginDepotNode {
+ ChainedOriginDepotNode *link;
+ u32 id;
+ u32 here_id;
+ u32 prev_id;
+
+ typedef ChainedOriginDepotDesc args_type;
+ bool eq(u32 hash, const args_type &args) const {
+ return here_id == args.here_id && prev_id == args.prev_id;
+ }
+ static uptr storage_size(const args_type &args) {
+ return sizeof(ChainedOriginDepotNode);
+ }
+ void store(const args_type &args, u32 other_hash) {
+ here_id = args.here_id;
+ prev_id = args.prev_id;
+ }
+ args_type load() const {
+ args_type ret = {here_id, prev_id};
+ return ret;
+ }
+ struct Handle {
+ ChainedOriginDepotNode *node_;
+ Handle() : node_(0) {}
+ explicit Handle(ChainedOriginDepotNode *node) : node_(node) {}
+ bool valid() { return node_; }
+ u32 id() { return node_->id; }
+ int here_id() { return node_->here_id; }
+ int prev_id() { return node_->prev_id; }
+ };
+ Handle get_handle() { return Handle(this); }
+
+ typedef Handle handle_type;
+};
+
+static StackDepotBase<ChainedOriginDepotNode, 3> chainedOriginDepot;
+
+StackDepotStats *ChainedOriginDepotGetStats() {
+ return chainedOriginDepot.GetStats();
+}
+
+bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id) {
+ ChainedOriginDepotDesc desc = {here_id, prev_id};
+ bool inserted;
+ ChainedOriginDepotNode::Handle h = chainedOriginDepot.Put(desc, &inserted);
+ *new_id = h.valid() ? h.id() : 0;
+ return inserted;
+}
+
+// Retrieves a stored stack trace by the id.
+u32 ChainedOriginDepotGet(u32 id, u32 *other) {
+ ChainedOriginDepotDesc desc = chainedOriginDepot.Get(id);
+ *other = desc.prev_id;
+ return desc.here_id;
+}
+
+} // namespace __msan
diff --git a/lib/msan/msan_chained_origin_depot.h b/lib/msan/msan_chained_origin_depot.h
new file mode 100644
index 000000000..db427b00d
--- /dev/null
+++ b/lib/msan/msan_chained_origin_depot.h
@@ -0,0 +1,26 @@
+//===-- msan_chained_origin_depot.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_CHAINED_ORIGIN_DEPOT_H
+#define MSAN_CHAINED_ORIGIN_DEPOT_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __msan {
+
+StackDepotStats *ChainedOriginDepotGetStats();
+bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id);
+// Retrieves a stored stack trace by the id.
+u32 ChainedOriginDepotGet(u32 id, u32 *other);
+
+} // namespace __msan
+
+#endif // MSAN_CHAINED_ORIGIN_DEPOT_H
diff --git a/lib/msan/msan_flags.h b/lib/msan/msan_flags.h
index 93fa8a60d..a506bbed8 100644
--- a/lib/msan/msan_flags.h
+++ b/lib/msan/msan_flags.h
@@ -19,6 +19,8 @@ namespace __msan {
// Flags.
struct Flags {
int exit_code;
+ int origin_history_size;
+ int origin_history_per_stack_limit;
bool poison_heap_with_zeroes; // default: false
bool poison_stack_with_zeroes; // default: false
bool poison_in_malloc; // default: true
diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc
index 7162ba735..5119d13ee 100644
--- a/lib/msan/msan_interceptors.cc
+++ b/lib/msan/msan_interceptors.cc
@@ -16,6 +16,8 @@
//===----------------------------------------------------------------------===//
#include "msan.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
#include "msan_thread.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_allocator.h"
@@ -817,9 +819,9 @@ void __msan_allocated_memory(const void* data, uptr size) {
__msan_poison(data, size);
if (__msan_get_track_origins()) {
u32 stack_id = StackDepotPut(stack.trace, stack.size);
- CHECK(stack_id);
- CHECK_EQ((stack_id >> 31), 0); // Higher bit is occupied by stack origins.
- __msan_set_origin(data, size, stack_id);
+ u32 id;
+ ChainedOriginDepotPut(stack_id, Origin::kHeapRoot, &id);
+ __msan_set_origin(data, size, Origin(id, 1).raw_id());
}
}
diff --git a/lib/msan/msan_interface_internal.h b/lib/msan/msan_interface_internal.h
index 955288dc9..c005c2a36 100644
--- a/lib/msan/msan_interface_internal.h
+++ b/lib/msan/msan_interface_internal.h
@@ -136,8 +136,6 @@ bool __msan_is_in_loader();
SANITIZER_INTERFACE_ATTRIBUTE
u32 __msan_get_umr_origin();
SANITIZER_INTERFACE_ATTRIBUTE
-const char *__msan_get_origin_descr_if_stack(u32 id);
-SANITIZER_INTERFACE_ATTRIBUTE
void __msan_partial_poison(const void* data, void* shadow, uptr size);
// Tell MSan about newly allocated memory (ex.: custom allocator).
diff --git a/lib/msan/msan_origin.h b/lib/msan/msan_origin.h
new file mode 100644
index 000000000..64acf1e06
--- /dev/null
+++ b/lib/msan/msan_origin.h
@@ -0,0 +1,76 @@
+//===-- msan_origin.h ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Origin id utils.
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_ORIGIN_H
+#define MSAN_ORIGIN_H
+
+namespace __msan {
+
+// Origin handling.
+//
+// Origin is a 32-bit identifier that is attached to any uninitialized value in
+// the program and describes, more or less exactly, how this memory came to be
+// uninitialized.
+//
+// Origin ids are values of ChainedOriginDepot, which is a mapping of (stack_id,
+// prev_id) -> id, where
+// * stack_id describes an event in the program, usually a memory store.
+// StackDepot keeps a mapping between those and corresponding stack traces.
+// * prev_id is another origin id that describes the earlier part of the
+// uninitialized value history.
+// Following a chain of prev_id provides the full recorded history of an
+// uninitialized value.
+//
+// This, effectively, defines a tree (or 2 trees, see below) where nodes are
+// points in value history marked with origin ids, and edges are events that are
+// marked with stack_id.
+//
+// There are 2 special root origin ids:
+// * kHeapRoot - an origin with prev_id == kHeapRoot describes an event of
+// allocating memory from heap.
+// * kStackRoot - an origin with prev_id == kStackRoot describes an event of
+// allocating memory from stack (i.e. on function entry).
+// Note that ChainedOriginDepot does not store any node for kHeapRoot or
+// kStackRoot. These are just special id values.
+//
+// Three highest bits of origin id are used to store the length (or depth) of
+// the origin chain. Special depth value of 0 means unlimited.
+
+class Origin {
+ public:
+ static const int kDepthBits = 3;
+ static const int kDepthShift = 32 - kDepthBits;
+ static const u32 kIdMask = ((u32)-1) >> (32 - kDepthShift);
+ static const u32 kDepthMask = ~kIdMask;
+
+ static const int kMaxDepth = (1 << kDepthBits) - 1;
+
+ static const u32 kHeapRoot = (u32)-1;
+ static const u32 kStackRoot = (u32)-2;
+
+ explicit Origin(u32 raw_id) : raw_id_(raw_id) {}
+ Origin(u32 id, u32 depth) : raw_id_((depth << kDepthShift) | id) {
+ CHECK_EQ(this->depth(), depth);
+ CHECK_EQ(this->id(), id);
+ }
+ int depth() const { return raw_id_ >> kDepthShift; }
+ u32 id() const { return raw_id_ & kIdMask; }
+ u32 raw_id() const { return raw_id_; }
+ bool isStackRoot() const { return raw_id_ == kStackRoot; }
+ bool isHeapRoot() const { return raw_id_ == kHeapRoot; }
+
+ private:
+ u32 raw_id_;
+};
+
+} // namespace __msan
+
+#endif // MSAN_ORIGIN_H
diff --git a/lib/msan/msan_report.cc b/lib/msan/msan_report.cc
index 453c81710..118e164dd 100644
--- a/lib/msan/msan_report.cc
+++ b/lib/msan/msan_report.cc
@@ -13,6 +13,8 @@
//===----------------------------------------------------------------------===//
#include "msan.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
@@ -56,30 +58,36 @@ static void DescribeStackOrigin(const char *so, uptr pc) {
}
}
-static void DescribeOrigin(u32 origin) {
- VPrintf(1, " raw origin id: %d\n", origin);
- uptr pc;
+static void DescribeOrigin(u32 id) {
+ VPrintf(1, " raw origin id: %d\n", id);
+ Decorator d;
while (true) {
- if (const char *so = GetOriginDescrIfStack(origin, &pc)) {
+ Origin o(id);
+ u32 prev_id;
+ u32 stack_id = ChainedOriginDepotGet(o.id(), &prev_id);
+ Origin prev_o(prev_id);
+
+ if (prev_o.isStackRoot()) {
+ uptr pc;
+ const char *so = GetStackOriginDescr(stack_id, &pc);
DescribeStackOrigin(so, pc);
break;
- }
- Decorator d;
- uptr size = 0;
- const uptr *trace = StackDepotGet(origin, &size);
- CHECK_GT(size, 0);
- if (TRACE_IS_CHAINED(trace[size - 1])) {
- // Linked origin.
- // FIXME: copied? modified? passed through? observed?
- Printf(" %sUninitialized value was stored to memory at%s\n", d.Origin(),
- d.End());
- StackTrace::PrintStack(trace, size - 1);
- origin = TRACE_TO_CHAINED_ID(trace[size - 1]);
- } else {
+ } else if (prev_o.isHeapRoot()) {
+ uptr size = 0;
+ const uptr *trace = StackDepotGet(stack_id, &size);
Printf(" %sUninitialized value was created by a heap allocation%s\n",
d.Origin(), d.End());
StackTrace::PrintStack(trace, size);
break;
+ } else {
+ // chained origin
+ uptr size = 0;
+ const uptr *trace = StackDepotGet(stack_id, &size);
+ // FIXME: copied? modified? passed through? observed?
+ Printf(" %sUninitialized value was stored to memory at%s\n", d.Origin(),
+ d.End());
+ StackTrace::PrintStack(trace, size - 1);
+ id = prev_id;
}
}
}
@@ -121,7 +129,13 @@ void ReportAtExitStatistics() {
// FIXME: we want this at normal exit, too!
// FIXME: but only with verbosity=1 or something
Printf("Unique heap origins: %zu\n", stack_depot_stats->n_uniq_ids);
- Printf("Stack depot mapped bytes: %zu\n", stack_depot_stats->mapped);
+ Printf("Stack depot allocated bytes: %zu\n", stack_depot_stats->allocated);
+
+ StackDepotStats *chained_origin_depot_stats = ChainedOriginDepotGetStats();
+ Printf("Unique origin histories: %zu\n",
+ chained_origin_depot_stats->n_uniq_ids);
+ Printf("History depot allocated bytes: %zu\n",
+ chained_origin_depot_stats->allocated);
}
class OriginSet {
diff --git a/lib/msan/tests/msan_test.cc b/lib/msan/tests/msan_test.cc
index 24cd65d77..d0b5ce2ff 100644
--- a/lib/msan/tests/msan_test.cc
+++ b/lib/msan/tests/msan_test.cc
@@ -105,20 +105,6 @@ static bool TrackingOrigins() {
EXPECT_EQ(origin, __msan_get_umr_origin()); \
} while (0)
-#define EXPECT_UMR_S(action, stack_origin) \
- do { \
- __msan_set_expect_umr(1); \
- action; \
- __msan_set_expect_umr(0); \
- U4 id = __msan_get_umr_origin(); \
- const char *str = __msan_get_origin_descr_if_stack(id); \
- if (!str || strcmp(str, stack_origin)) { \
- fprintf(stderr, "EXPECT_POISONED_S: id=%u %s, %s", \
- id, stack_origin, str); \
- EXPECT_EQ(1, 0); \
- } \
- } while (0)
-
#define EXPECT_POISONED(x) ExpectPoisoned(x)
template<typename T>
@@ -136,21 +122,6 @@ void ExpectPoisonedWithOrigin(const T& t, unsigned origin) {
EXPECT_EQ(origin, __msan_get_origin((void*)&t));
}
-#define EXPECT_POISONED_S(x, stack_origin) \
- ExpectPoisonedWithStackOrigin(x, stack_origin)
-
-template<typename T>
-void ExpectPoisonedWithStackOrigin(const T& t, const char *stack_origin) {
- EXPECT_NE(-1, __msan_test_shadow((void*)&t, sizeof(t)));
- U4 id = __msan_get_origin((void*)&t);
- const char *str = __msan_get_origin_descr_if_stack(id);
- if (!str || strcmp(str, stack_origin)) {
- fprintf(stderr, "EXPECT_POISONED_S: id=%u %s, %s",
- id, stack_origin, str);
- EXPECT_EQ(1, 0);
- }
-}
-
#define EXPECT_NOT_POISONED(x) ExpectNotPoisoned(x)
template<typename T>
@@ -3885,29 +3856,6 @@ TEST(MemorySanitizerOrigins, Select) {
EXPECT_POISONED_O(g_0 ? 1 : *GetPoisonedO<S4>(0, __LINE__), __LINE__);
}
-extern "C"
-NOINLINE char AllocaTO() {
- int ar[100];
- break_optimization(ar);
- return ar[10];
- // fprintf(stderr, "Descr: %s\n",
- // __msan_get_origin_descr_if_stack(__msan_get_origin_tls()));
-}
-
-TEST(MemorySanitizerOrigins, Alloca) {
- if (!TrackingOrigins()) return;
- EXPECT_POISONED_S(AllocaTO(), "ar@AllocaTO");
- EXPECT_POISONED_S(AllocaTO(), "ar@AllocaTO");
- EXPECT_POISONED_S(AllocaTO(), "ar@AllocaTO");
- EXPECT_POISONED_S(AllocaTO(), "ar@AllocaTO");
-}
-
-// FIXME: replace with a lit-like test.
-TEST(MemorySanitizerOrigins, DISABLED_AllocaDeath) {
- if (!TrackingOrigins()) return;
- EXPECT_DEATH(AllocaTO(), "ORIGIN: stack allocation: ar@AllocaTO");
-}
-
NOINLINE int RetvalOriginTest(U4 origin) {
int *a = new int;
break_optimization(a);
diff --git a/lib/sanitizer_common/CMakeLists.txt b/lib/sanitizer_common/CMakeLists.txt
index d28e5e6e3..90e063e1d 100644
--- a/lib/sanitizer_common/CMakeLists.txt
+++ b/lib/sanitizer_common/CMakeLists.txt
@@ -12,6 +12,7 @@ set(SANITIZER_SOURCES
sanitizer_libignore.cc
sanitizer_linux.cc
sanitizer_mac.cc
+ sanitizer_persistent_allocator.cc
sanitizer_platform_limits_linux.cc
sanitizer_platform_limits_posix.cc
sanitizer_posix.cc
@@ -65,6 +66,7 @@ set(SANITIZER_HEADERS
sanitizer_list.h
sanitizer_mac.h
sanitizer_mutex.h
+ sanitizer_persistent_allocator.h
sanitizer_placement_new.h
sanitizer_platform.h
sanitizer_platform_interceptors.h
@@ -73,6 +75,7 @@ set(SANITIZER_HEADERS
sanitizer_quarantine.h
sanitizer_report_decorator.h
sanitizer_stackdepot.h
+ sanitizer_stackdepotbase.h
sanitizer_stacktrace.h
sanitizer_stoptheworld.h
sanitizer_suppressions.h
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
index ab3cc4522..48b7b42e6 100644
--- a/lib/sanitizer_common/sanitizer_common.h
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -542,4 +542,9 @@ inline void *operator new(__sanitizer::operator_new_size_type size,
return alloc.Allocate(size);
}
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr allocated;
+};
+
#endif // SANITIZER_COMMON_H
diff --git a/lib/sanitizer_common/sanitizer_persistent_allocator.cc b/lib/sanitizer_common/sanitizer_persistent_allocator.cc
new file mode 100644
index 000000000..5fa533a70
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_persistent_allocator.cc
@@ -0,0 +1,19 @@
+//===-- sanitizer_persistent_allocator.cc -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+PersistentAllocator thePersistentAllocator;
+
+} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_persistent_allocator.h b/lib/sanitizer_common/sanitizer_persistent_allocator.h
new file mode 100644
index 000000000..326406b12
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_persistent_allocator.h
@@ -0,0 +1,71 @@
+//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A fast memory allocator that does not support free() nor realloc().
+// All allocations are forever.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
+#define SANITIZER_PERSISTENT_ALLOCATOR_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class PersistentAllocator {
+ public:
+ void *alloc(uptr size);
+
+ private:
+ void *tryAlloc(uptr size);
+ StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
+ atomic_uintptr_t region_pos; // Region allocator for Node's.
+ atomic_uintptr_t region_end;
+};
+
+inline void *PersistentAllocator::tryAlloc(uptr size) {
+ // Optimisic lock-free allocation, essentially try to bump the region ptr.
+ for (;;) {
+ uptr cmp = atomic_load(&region_pos, memory_order_acquire);
+ uptr end = atomic_load(&region_end, memory_order_acquire);
+ if (cmp == 0 || cmp + size > end) return 0;
+ if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
+ memory_order_acquire))
+ return (void *)cmp;
+ }
+}
+
+inline void *PersistentAllocator::alloc(uptr size) {
+ // First, try to allocate optimisitically.
+ void *s = tryAlloc(size);
+ if (s) return s;
+ // If failed, lock, retry and alloc new superblock.
+ SpinMutexLock l(&mtx);
+ for (;;) {
+ s = tryAlloc(size);
+ if (s) return s;
+ atomic_store(&region_pos, 0, memory_order_relaxed);
+ uptr allocsz = 64 * 1024;
+ if (allocsz < size) allocsz = size;
+ uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+ atomic_store(&region_end, mem + allocsz, memory_order_release);
+ atomic_store(&region_pos, mem, memory_order_release);
+ }
+}
+
+extern PersistentAllocator thePersistentAllocator;
+inline void *PersistentAlloc(uptr sz) {
+ return thePersistentAllocator.alloc(sz);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
diff --git a/lib/sanitizer_common/sanitizer_stackdepot.cc b/lib/sanitizer_common/sanitizer_stackdepot.cc
index 2793bd071..62ab5df30 100644
--- a/lib/sanitizer_common/sanitizer_stackdepot.cc
+++ b/lib/sanitizer_common/sanitizer_stackdepot.cc
@@ -12,193 +12,116 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_stackdepot.h"
+
#include "sanitizer_common.h"
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_mutex.h"
-#include "sanitizer_atomic.h"
+#include "sanitizer_stackdepotbase.h"
namespace __sanitizer {
-const int kTabSize = 1024 * 1024; // Hash table size.
-const int kPartBits = 8;
-const int kPartShift = sizeof(u32) * 8 - kPartBits - 1;
-const int kPartCount = 1 << kPartBits; // Number of subparts in the table.
-const int kPartSize = kTabSize / kPartCount;
-const int kMaxId = 1 << kPartShift;
+struct StackDepotDesc {
+ const uptr *stack;
+ uptr size;
+ u32 hash() const {
+ // murmur2
+ const u32 m = 0x5bd1e995;
+ const u32 seed = 0x9747b28c;
+ const u32 r = 24;
+ u32 h = seed ^ (size * sizeof(uptr));
+ for (uptr i = 0; i < size; i++) {
+ u32 k = stack[i];
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+ }
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+ }
+ bool is_valid() { return size > 0 && stack; }
+};
-struct StackDesc {
- StackDesc *link;
+struct StackDepotNode {
+ StackDepotNode *link;
u32 id;
- u32 hash;
+ atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
uptr size;
uptr stack[1]; // [size]
-};
-static struct {
- StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
- atomic_uintptr_t region_pos; // Region allocator for StackDesc's.
- atomic_uintptr_t region_end;
- atomic_uintptr_t tab[kTabSize]; // Hash table of StackDesc's.
- atomic_uint32_t seq[kPartCount]; // Unique id generators.
-} depot;
+ static const u32 kUseCountBits = 20;
+ static const u32 kMaxUseCount = 1 << kUseCountBits;
+ static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
+ static const u32 kHashMask = ~kUseCountMask;
+
+ typedef StackDepotDesc args_type;
+ bool eq(u32 hash, const args_type &args) const {
+ u32 hash_bits =
+ atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
+ if ((hash & kHashMask) != hash_bits || args.size != size) return false;
+ uptr i = 0;
+ for (; i < size; i++) {
+ if (stack[i] != args.stack[i]) return false;
+ }
+ return true;
+ }
+ static uptr storage_size(const args_type &args) {
+ return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
+ }
+ void store(const args_type &args, u32 hash) {
+ atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
+ size = args.size;
+ internal_memcpy(stack, args.stack, size * sizeof(uptr));
+ }
+ args_type load() const {
+ args_type ret = {&stack[0], size};
+ return ret;
+ }
+ StackDepotHandle get_handle() { return StackDepotHandle(this); }
-static StackDepotStats stats;
+ typedef StackDepotHandle handle_type;
+};
-StackDepotStats *StackDepotGetStats() {
- return &stats;
-}
+COMPILER_CHECK(StackDepotNode::kMaxUseCount == kStackDepotMaxUseCount);
-static u32 hash(const uptr *stack, uptr size) {
- // murmur2
- const u32 m = 0x5bd1e995;
- const u32 seed = 0x9747b28c;
- const u32 r = 24;
- u32 h = seed ^ (size * sizeof(uptr));
- for (uptr i = 0; i < size; i++) {
- u32 k = stack[i];
- k *= m;
- k ^= k >> r;
- k *= m;
- h *= m;
- h ^= k;
- }
- h ^= h >> 13;
- h *= m;
- h ^= h >> 15;
- return h;
+u32 StackDepotHandle::id() { return node_->id; }
+int StackDepotHandle::use_count() {
+ return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
+ StackDepotNode::kUseCountMask;
}
-
-static StackDesc *tryallocDesc(uptr memsz) {
- // Optimisic lock-free allocation, essentially try to bump the region ptr.
- for (;;) {
- uptr cmp = atomic_load(&depot.region_pos, memory_order_acquire);
- uptr end = atomic_load(&depot.region_end, memory_order_acquire);
- if (cmp == 0 || cmp + memsz > end)
- return 0;
- if (atomic_compare_exchange_weak(
- &depot.region_pos, &cmp, cmp + memsz,
- memory_order_acquire))
- return (StackDesc*)cmp;
- }
+void StackDepotHandle::inc_use_count_unsafe() {
+ u32 prev =
+ atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
+ StackDepotNode::kUseCountMask;
+ CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
}
+uptr StackDepotHandle::size() { return node_->size; }
+uptr *StackDepotHandle::stack() { return &node_->stack[0]; }
-static StackDesc *allocDesc(uptr size) {
- // First, try to allocate optimisitically.
- uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
- StackDesc *s = tryallocDesc(memsz);
- if (s)
- return s;
- // If failed, lock, retry and alloc new superblock.
- SpinMutexLock l(&depot.mtx);
- for (;;) {
- s = tryallocDesc(memsz);
- if (s)
- return s;
- atomic_store(&depot.region_pos, 0, memory_order_relaxed);
- uptr allocsz = 64 * 1024;
- if (allocsz < memsz)
- allocsz = memsz;
- uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
- stats.mapped += allocsz;
- atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
- atomic_store(&depot.region_pos, mem, memory_order_release);
- }
-}
+// FIXME(dvyukov): this single reserved bit is used in TSan.
+typedef StackDepotBase<StackDepotNode, 1> StackDepot;
+static StackDepot theDepot;
-static u32 find(StackDesc *s, const uptr *stack, uptr size, u32 hash) {
- // Searches linked list s for the stack, returns its id.
- for (; s; s = s->link) {
- if (s->hash == hash && s->size == size) {
- uptr i = 0;
- for (; i < size; i++) {
- if (stack[i] != s->stack[i])
- break;
- }
- if (i == size)
- return s->id;
- }
- }
- return 0;
-}
-
-static StackDesc *lock(atomic_uintptr_t *p) {
- // Uses the pointer lsb as mutex.
- for (int i = 0;; i++) {
- uptr cmp = atomic_load(p, memory_order_relaxed);
- if ((cmp & 1) == 0
- && atomic_compare_exchange_weak(p, &cmp, cmp | 1,
- memory_order_acquire))
- return (StackDesc*)cmp;
- if (i < 10)
- proc_yield(10);
- else
- internal_sched_yield();
- }
+StackDepotStats *StackDepotGetStats() {
+ return theDepot.GetStats();
}
-static void unlock(atomic_uintptr_t *p, StackDesc *s) {
- DCHECK_EQ((uptr)s & 1, 0);
- atomic_store(p, (uptr)s, memory_order_release);
+u32 StackDepotPut(const uptr *stack, uptr size) {
+ StackDepotDesc desc = {stack, size};
+ StackDepotHandle h = theDepot.Put(desc);
+ return h.valid() ? h.id() : 0;
}
-u32 StackDepotPut(const uptr *stack, uptr size) {
- if (stack == 0 || size == 0)
- return 0;
- uptr h = hash(stack, size);
- atomic_uintptr_t *p = &depot.tab[h % kTabSize];
- uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
- // First, try to find the existing stack.
- u32 id = find(s, stack, size, h);
- if (id)
- return id;
- // If failed, lock, retry and insert new.
- StackDesc *s2 = lock(p);
- if (s2 != s) {
- id = find(s2, stack, size, h);
- if (id) {
- unlock(p, s2);
- return id;
- }
- }
- uptr part = (h % kTabSize) / kPartSize;
- id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
- stats.n_uniq_ids++;
- CHECK_LT(id, kMaxId);
- id |= part << kPartShift;
- CHECK_NE(id, 0);
- CHECK_EQ(id & (1u << 31), 0);
- s = allocDesc(size);
- s->id = id;
- s->hash = h;
- s->size = size;
- internal_memcpy(s->stack, stack, size * sizeof(uptr));
- s->link = s2;
- unlock(p, s);
- return id;
+StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size) {
+ StackDepotDesc desc = {stack, size};
+ return theDepot.Put(desc);
}
const uptr *StackDepotGet(u32 id, uptr *size) {
- if (id == 0)
- return 0;
- CHECK_EQ(id & (1u << 31), 0);
- // High kPartBits contain part id, so we need to scan at most kPartSize lists.
- uptr part = id >> kPartShift;
- for (int i = 0; i != kPartSize; i++) {
- uptr idx = part * kPartSize + i;
- CHECK_LT(idx, kTabSize);
- atomic_uintptr_t *p = &depot.tab[idx];
- uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
- for (; s; s = s->link) {
- if (s->id == id) {
- *size = s->size;
- return s->stack;
- }
- }
- }
- *size = 0;
- return 0;
+ StackDepotDesc desc = theDepot.Get(id);
+ *size = desc.size;
+ return desc.stack;
}
bool StackDepotReverseMap::IdDescPair::IdComparator(
@@ -209,10 +132,10 @@ bool StackDepotReverseMap::IdDescPair::IdComparator(
StackDepotReverseMap::StackDepotReverseMap()
: map_(StackDepotGetStats()->n_uniq_ids + 100) {
- for (int idx = 0; idx < kTabSize; idx++) {
- atomic_uintptr_t *p = &depot.tab[idx];
+ for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
+ atomic_uintptr_t *p = &theDepot.tab[idx];
uptr v = atomic_load(p, memory_order_consume);
- StackDesc *s = (StackDesc*)(v & ~1);
+ StackDepotNode *s = (StackDepotNode*)(v & ~1);
for (; s; s = s->link) {
IdDescPair pair = {s->id, s};
map_.push_back(pair);
@@ -230,7 +153,7 @@ const uptr *StackDepotReverseMap::Get(u32 id, uptr *size) {
*size = 0;
return 0;
}
- StackDesc *desc = map_[idx].desc;
+ StackDepotNode *desc = map_[idx].desc;
*size = desc->size;
return desc->stack;
}
diff --git a/lib/sanitizer_common/sanitizer_stackdepot.h b/lib/sanitizer_common/sanitizer_stackdepot.h
index 4f77ff28e..58cc91fc6 100644
--- a/lib/sanitizer_common/sanitizer_stackdepot.h
+++ b/lib/sanitizer_common/sanitizer_stackdepot.h
@@ -19,21 +19,27 @@
namespace __sanitizer {
// StackDepot efficiently stores huge amounts of stack traces.
+struct StackDepotNode;
+struct StackDepotHandle {
+ StackDepotNode *node_;
+ StackDepotHandle() : node_(0) {}
+ explicit StackDepotHandle(StackDepotNode *node) : node_(node) {}
+ bool valid() { return node_; }
+ u32 id();
+ int use_count();
+ void inc_use_count_unsafe();
+ uptr size();
+ uptr *stack();
+};
+
+const int kStackDepotMaxUseCount = 1U << 20;
-// Maps stack trace to an unique id.
+StackDepotStats *StackDepotGetStats();
u32 StackDepotPut(const uptr *stack, uptr size);
+StackDepotHandle StackDepotPut_WithHandle(const uptr *stack, uptr size);
// Retrieves a stored stack trace by the id.
const uptr *StackDepotGet(u32 id, uptr *size);
-struct StackDepotStats {
- uptr n_uniq_ids;
- uptr mapped;
-};
-
-StackDepotStats *StackDepotGetStats();
-
-struct StackDesc;
-
// Instantiating this class creates a snapshot of StackDepot which can be
// efficiently queried with StackDepotGet(). You can use it concurrently with
// StackDepot, but the snapshot is only guaranteed to contain those stack traces
@@ -46,7 +52,7 @@ class StackDepotReverseMap {
private:
struct IdDescPair {
u32 id;
- StackDesc *desc;
+ StackDepotNode *desc;
static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
};
@@ -57,6 +63,7 @@ class StackDepotReverseMap {
StackDepotReverseMap(const StackDepotReverseMap&);
void operator=(const StackDepotReverseMap&);
};
+
} // namespace __sanitizer
#endif // SANITIZER_STACKDEPOT_H
diff --git a/lib/sanitizer_common/sanitizer_stackdepotbase.h b/lib/sanitizer_common/sanitizer_stackdepotbase.h
new file mode 100644
index 000000000..07a973c44
--- /dev/null
+++ b/lib/sanitizer_common/sanitizer_stackdepotbase.h
@@ -0,0 +1,153 @@
+//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of a mapping from arbitrary values to unique 32-bit
+// identifiers.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKDEPOTBASE_H
+#define SANITIZER_STACKDEPOTBASE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+template <class Node, int kReservedBits>
+class StackDepotBase {
+ public:
+ typedef typename Node::args_type args_type;
+ typedef typename Node::handle_type handle_type;
+ // Maps stack trace to an unique id.
+ handle_type Put(args_type args, bool *inserted = 0);
+ // Retrieves a stored stack trace by the id.
+ args_type Get(u32 id);
+
+ StackDepotStats *GetStats() { return &stats; }
+
+ private:
+ static Node *find(Node *s, args_type args, u32 hash);
+ static Node *lock(atomic_uintptr_t *p);
+ static void unlock(atomic_uintptr_t *p, Node *s);
+
+ static const int kTabSize = 1024 * 1024; // Hash table size.
+ static const int kPartBits = 8;
+ static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
+ static const int kPartCount =
+ 1 << kPartBits; // Number of subparts in the table.
+ static const int kPartSize = kTabSize / kPartCount;
+ static const int kMaxId = 1 << kPartShift;
+
+ atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
+ atomic_uint32_t seq[kPartCount]; // Unique id generators.
+
+ StackDepotStats stats;
+
+ friend class StackDepotReverseMap;
+};
+
+template <class Node, int kReservedBits>
+Node *StackDepotBase<Node, kReservedBits>::find(Node *s, args_type args,
+ u32 hash) {
+ // Searches linked list s for the stack, returns its id.
+ for (; s; s = s->link) {
+ if (s->eq(hash, args)) {
+ return s;
+ }
+ }
+ return 0;
+}
+
+template <class Node, int kReservedBits>
+Node *StackDepotBase<Node, kReservedBits>::lock(atomic_uintptr_t *p) {
+ // Uses the pointer lsb as mutex.
+ for (int i = 0;; i++) {
+ uptr cmp = atomic_load(p, memory_order_relaxed);
+ if ((cmp & 1) == 0 &&
+ atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
+ return (Node *)cmp;
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ }
+}
+
+template <class Node, int kReservedBits>
+void StackDepotBase<Node, kReservedBits>::unlock(atomic_uintptr_t *p, Node *s) {
+ DCHECK_EQ((uptr)s & 1, 0);
+ atomic_store(p, (uptr)s, memory_order_release);
+}
+
+template <class Node, int kReservedBits>
+typename StackDepotBase<Node, kReservedBits>::handle_type
+StackDepotBase<Node, kReservedBits>::Put(args_type args, bool *inserted) {
+ if (inserted) *inserted = false;
+ if (!args.is_valid()) return handle_type();
+ uptr h = args.hash();
+ atomic_uintptr_t *p = &tab[h % kTabSize];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ // First, try to find the existing stack.
+ Node *node = find(s, args, h);
+ if (node) return node->get_handle();
+ // If failed, lock, retry and insert new.
+ Node *s2 = lock(p);
+ if (s2 != s) {
+ node = find(s2, args, h);
+ if (node) {
+ unlock(p, s2);
+ return node->get_handle();
+ }
+ }
+ uptr part = (h % kTabSize) / kPartSize;
+ u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
+ stats.n_uniq_ids++;
+ CHECK_LT(id, kMaxId);
+ id |= part << kPartShift;
+ CHECK_NE(id, 0);
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ uptr memsz = Node::storage_size(args);
+ s = (Node *)PersistentAlloc(memsz);
+ stats.allocated += memsz;
+ s->id = id;
+ s->store(args, h);
+ s->link = s2;
+ unlock(p, s);
+ if (inserted) *inserted = true;
+ return s->get_handle();
+}
+
+template <class Node, int kReservedBits>
+typename StackDepotBase<Node, kReservedBits>::args_type
+StackDepotBase<Node, kReservedBits>::Get(u32 id) {
+ if (id == 0) {
+ return args_type();
+ }
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ // High kPartBits contain part id, so we need to scan at most kPartSize lists.
+ uptr part = id >> kPartShift;
+ for (int i = 0; i != kPartSize; i++) {
+ uptr idx = part * kPartSize + i;
+ CHECK_LT(idx, kTabSize);
+ atomic_uintptr_t *p = &tab[idx];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ for (; s; s = s->link) {
+ if (s->id == id) {
+ return s->load();
+ }
+ }
+ }
+ return args_type();
+}
+
+} // namespace __sanitizer
+#endif // SANITIZER_STACKDEPOTBASE_H