summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_stack_trace.cc
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2014-05-29 13:50:54 +0000
committerDmitry Vyukov <dvyukov@google.com>2014-05-29 13:50:54 +0000
commitef0f7ccc94812900a1426fb04979a7779b75db45 (patch)
treeb4180cb99c6490cf179cbfd94f91400575af677d /lib/tsan/rtl/tsan_stack_trace.cc
parentdbb3f375dc7377890be4db8f6870a7a53a130a5a (diff)
tsan: refactor storage of meta information for heap blocks and sync objects
The new storage (MetaMap) is based on direct shadow (instead of a hashmap + per-block lists). This solves a number of problems: - eliminates quadratic behaviour in SyncTab::GetAndLock (https://code.google.com/p/thread-sanitizer/issues/detail?id=26) - eliminates contention in SyncTab - eliminates contention in internal allocator during allocation of sync objects - removes a bunch of ad-hoc code in java interface - reduces java shadow from 2x to 1/2x - allows to memorize heap block meta info for Java and Go - allows to cleanup sync object meta info for Go - which in turn enabled deadlock detector for Go git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@209810 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/tsan/rtl/tsan_stack_trace.cc')
-rw-r--r--lib/tsan/rtl/tsan_stack_trace.cc112
1 files changed, 112 insertions, 0 deletions
diff --git a/lib/tsan/rtl/tsan_stack_trace.cc b/lib/tsan/rtl/tsan_stack_trace.cc
new file mode 100644
index 000000000..a8374f428
--- /dev/null
+++ b/lib/tsan/rtl/tsan_stack_trace.cc
@@ -0,0 +1,112 @@
+//===-- tsan_stack_trace.cc -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+//#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_stack_trace.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+StackTrace::StackTrace()
+ : n_()
+ , s_()
+ , c_() {
+}
+
+StackTrace::StackTrace(uptr *buf, uptr cnt)
+ : n_()
+ , s_(buf)
+ , c_(cnt) {
+ CHECK_NE(buf, 0);
+ CHECK_NE(cnt, 0);
+}
+
+StackTrace::~StackTrace() {
+ Reset();
+}
+
+void StackTrace::Reset() {
+ if (s_ && !c_) {
+ CHECK_NE(n_, 0);
+ internal_free(s_);
+ s_ = 0;
+ }
+ n_ = 0;
+}
+
+void StackTrace::Init(const uptr *pcs, uptr cnt) {
+ Reset();
+ if (cnt == 0)
+ return;
+ if (c_) {
+ CHECK_NE(s_, 0);
+ CHECK_LE(cnt, c_);
+ } else {
+ s_ = (uptr*)internal_alloc(MBlockStackTrace, cnt * sizeof(s_[0]));
+ }
+ n_ = cnt;
+ internal_memcpy(s_, pcs, cnt * sizeof(s_[0]));
+}
+
+void StackTrace::ObtainCurrent(ThreadState *thr, uptr toppc) {
+ Reset();
+ n_ = thr->shadow_stack_pos - thr->shadow_stack;
+ if (n_ + !!toppc == 0)
+ return;
+ uptr start = 0;
+ if (c_) {
+ CHECK_NE(s_, 0);
+ if (n_ + !!toppc > c_) {
+ start = n_ - c_ + !!toppc;
+ n_ = c_ - !!toppc;
+ }
+ } else {
+ // Cap potentially huge stacks.
+ if (n_ + !!toppc > kTraceStackSize) {
+ start = n_ - kTraceStackSize + !!toppc;
+ n_ = kTraceStackSize - !!toppc;
+ }
+ s_ = (uptr*)internal_alloc(MBlockStackTrace,
+ (n_ + !!toppc) * sizeof(s_[0]));
+ }
+ for (uptr i = 0; i < n_; i++)
+ s_[i] = thr->shadow_stack[start + i];
+ if (toppc) {
+ s_[n_] = toppc;
+ n_++;
+ }
+}
+
+void StackTrace::CopyFrom(const StackTrace& other) {
+ Reset();
+ Init(other.Begin(), other.Size());
+}
+
+bool StackTrace::IsEmpty() const {
+ return n_ == 0;
+}
+
+uptr StackTrace::Size() const {
+ return n_;
+}
+
+uptr StackTrace::Get(uptr i) const {
+ CHECK_LT(i, n_);
+ return s_[i];
+}
+
+const uptr *StackTrace::Begin() const {
+ return s_;
+}
+
+} // namespace __tsan