summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2014-02-21 15:07:18 +0000
committerKostya Serebryany <kcc@google.com>2014-02-21 15:07:18 +0000
commitde60b3af716f062752e1dee410de1064438a20af (patch)
treee700e86fb5c432ad36724f4d428b8489ef5f00af
parentdec7f55adcd8487a99cca5dbd90205a0fe1e73e7 (diff)
[tsan] add coarse-grained lock around the DeadlockDetector. We can do better than that, but that's a start.
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@201861 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/tsan/rtl/tsan_mutex.h1
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl.h3
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc34
-rw-r--r--lib/tsan/rtl/tsan_stat.cc1
-rw-r--r--lib/tsan/rtl/tsan_stat.h1
-rw-r--r--test/tsan/deadlock_detector_stress_test.cc55
7 files changed, 74 insertions, 25 deletions
diff --git a/lib/tsan/rtl/tsan_mutex.h b/lib/tsan/rtl/tsan_mutex.h
index 4e47d7a31..0cb0e5be4 100644
--- a/lib/tsan/rtl/tsan_mutex.h
+++ b/lib/tsan/rtl/tsan_mutex.h
@@ -31,6 +31,7 @@ enum MutexType {
MutexTypeAtExit,
MutexTypeMBlock,
MutexTypeJavaMBlock,
+ MutexTypeDeadlockDetector,
// This must be the last.
MutexTypeCount
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index b0c2ee35f..408423db2 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -82,7 +82,9 @@ Context::Context()
CreateThreadContext, kMaxTid, kThreadQuarantineSize))
, racy_stacks(MBlockRacyStacks)
, racy_addresses(MBlockRacyAddresses)
- , fired_suppressions(8) {
+ , fired_suppressions(8)
+ , dd_mtx(MutexTypeDeadlockDetector, StatMtxDeadlockDetector) {
+ dd.clear();
}
// The objects are allocated in TLS, so one may rely on zero-initialization.
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index dc7ea0c26..b7090f918 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -549,6 +549,9 @@ struct Context {
// Number of fired suppressions may be large enough.
InternalMmapVector<FiredSuppression> fired_suppressions;
+ __sanitizer::DeadlockDetector<DDBV> dd;
+ Mutex dd_mtx;
+
Flags flags;
u64 stat[StatCnt];
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index 364f4b57c..74aec4e6a 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -22,11 +22,10 @@
namespace __tsan {
-static __sanitizer::DeadlockDetector<DDBV> g_dd;
-static void EnsureDeadlockDetectorID(ThreadState *thr, SyncVar *s) {
- if (!g_dd.nodeBelongsToCurrentEpoch(s->deadlock_detector_id))
- s->deadlock_detector_id = g_dd.newNode(reinterpret_cast<uptr>(s));
+static void EnsureDeadlockDetectorID(Context *ctx, SyncVar *s) {
+ if (!ctx->dd.nodeBelongsToCurrentEpoch(s->deadlock_detector_id))
+ s->deadlock_detector_id = ctx->dd.newNode(reinterpret_cast<uptr>(s));
}
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
@@ -61,8 +60,9 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
if (s == 0)
return;
if (common_flags()->detect_deadlocks) {
- if (g_dd.nodeBelongsToCurrentEpoch(s->deadlock_detector_id))
- g_dd.removeNode(s->deadlock_detector_id);
+ Lock lk(&ctx->dd_mtx);
+ if (ctx->dd.nodeBelongsToCurrentEpoch(s->deadlock_detector_id))
+ ctx->dd.removeNode(s->deadlock_detector_id);
s->deadlock_detector_id = 0;
}
if (IsAppMem(addr)) {
@@ -92,11 +92,12 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
}
void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
+ Context *ctx = CTX();
DPrintf("#%d: MutexLock %zx rec=%d\n", thr->tid, addr, rec);
CHECK_GT(rec, 0);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
if (s->owner_tid == SyncVar::kInvalidTid) {
@@ -119,24 +120,25 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
s->recursion += rec;
thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
if (common_flags()->detect_deadlocks) {
- EnsureDeadlockDetectorID(thr, s);
- if (g_dd.isHeld(&thr->deadlock_detector_tls, s->deadlock_detector_id)) {
+ Lock lk(&ctx->dd_mtx);
+ EnsureDeadlockDetectorID(ctx, s);
+ if (ctx->dd.isHeld(&thr->deadlock_detector_tls, s->deadlock_detector_id)) {
// FIXME: add tests, handle the real recursive locks.
Printf("ThreadSanitizer: reursive-lock\n");
}
// Printf("MutexLock: %zx\n", s->deadlock_detector_id);
bool has_deadlock =
- g_dd.onLock(&thr->deadlock_detector_tls, s->deadlock_detector_id);
+ ctx->dd.onLock(&thr->deadlock_detector_tls, s->deadlock_detector_id);
if (has_deadlock) {
uptr path[10];
- uptr len = g_dd.findPathToHeldLock(&thr->deadlock_detector_tls,
+ uptr len = ctx->dd.findPathToHeldLock(&thr->deadlock_detector_tls,
s->deadlock_detector_id, path,
ARRAY_SIZE(path));
CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
ThreadRegistryLock l(CTX()->thread_registry);
ScopedReport rep(ReportTypeDeadlock);
for (uptr i = 0; i < len; i++)
- rep.AddMutex(reinterpret_cast<SyncVar*>(g_dd.getData(path[i])));
+ rep.AddMutex(reinterpret_cast<SyncVar*>(ctx->dd.getData(path[i])));
StackTrace trace;
trace.ObtainCurrent(thr, pc);
rep.AddStack(&trace);
@@ -147,10 +149,11 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr, int rec) {
}
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
+ Context *ctx = CTX();
DPrintf("#%d: MutexUnlock %zx all=%d\n", thr->tid, addr, all);
if (IsAppMem(addr))
MemoryReadAtomic(thr, pc, addr, kSizeLog1);
- SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
+ SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
@@ -180,9 +183,10 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
}
thr->mset.Del(s->GetId(), true);
if (common_flags()->detect_deadlocks) {
- EnsureDeadlockDetectorID(thr, s);
+ Lock lk(&ctx->dd_mtx);
+ EnsureDeadlockDetectorID(ctx, s);
// Printf("MutexUnlock: %zx\n", s->deadlock_detector_id);
- g_dd.onUnlock(&thr->deadlock_detector_tls,
+ ctx->dd.onUnlock(&thr->deadlock_detector_tls,
s->deadlock_detector_id);
}
s->mtx.Unlock();
diff --git a/lib/tsan/rtl/tsan_stat.cc b/lib/tsan/rtl/tsan_stat.cc
index 4da75589d..010a2317f 100644
--- a/lib/tsan/rtl/tsan_stat.cc
+++ b/lib/tsan/rtl/tsan_stat.cc
@@ -145,6 +145,7 @@ void StatOutput(u64 *stat) {
name[StatMtxAnnotations] = " Annotations ";
name[StatMtxMBlock] = " MBlock ";
name[StatMtxJavaMBlock] = " JavaMBlock ";
+ name[StatMtxDeadlockDetector] = " DeadlockDetector ";
name[StatMtxFD] = " FD ";
Printf("Statistics:\n");
diff --git a/lib/tsan/rtl/tsan_stat.h b/lib/tsan/rtl/tsan_stat.h
index 789c4c725..6453bda5f 100644
--- a/lib/tsan/rtl/tsan_stat.h
+++ b/lib/tsan/rtl/tsan_stat.h
@@ -143,6 +143,7 @@ enum StatType {
StatMtxAtExit,
StatMtxMBlock,
StatMtxJavaMBlock,
+ StatMtxDeadlockDetector,
StatMtxFD,
// This must be the last.
diff --git a/test/tsan/deadlock_detector_stress_test.cc b/test/tsan/deadlock_detector_stress_test.cc
index 535ced7c6..74dafe84b 100644
--- a/test/tsan/deadlock_detector_stress_test.cc
+++ b/test/tsan/deadlock_detector_stress_test.cc
@@ -44,8 +44,8 @@ class LockTest {
// CHECK: Starting Test1
fprintf(stderr, "Expecting lock inversion: %p %p\n", A(0), A(1));
// CHECK: Expecting lock inversion: [[A1:0x[a-f0-9]*]] [[A2:0x[a-f0-9]*]]
- L(0); L(1); U(0); U(1);
- L(1); L(0); U(0); U(1);
+ Lock_0_1();
+ Lock_1_0();
// CHECK: WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock)
// CHECK: path: [[M1:M[0-9]+]] => [[M2:M[0-9]+]] => [[M1]]
// CHECK: Mutex [[M1]] ([[A1]]) created at:
@@ -59,9 +59,9 @@ class LockTest {
// CHECK: Starting Test2
fprintf(stderr, "Expecting lock inversion: %p %p %p\n", A(0), A(1), A(2));
// CHECK: Expecting lock inversion: [[A1:0x[a-f0-9]*]] [[A2:0x[a-f0-9]*]] [[A3:0x[a-f0-9]*]]
- L(0); L(1); U(0); U(1);
- L(1); L(2); U(1); U(2);
- L(2); L(0); U(0); U(2);
+ Lock2(0, 1);
+ Lock2(1, 2);
+ Lock2(2, 0);
// CHECK: WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock)
// CHECK: path: [[M1:M[0-9]+]] => [[M2:M[0-9]+]] => [[M3:M[0-9]+]] => [[M1]]
// CHECK: Mutex [[M1]] ([[A1]]) created at:
@@ -76,11 +76,11 @@ class LockTest {
void Test3() {
fprintf(stderr, "Starting Test3\n");
// CHECK: Starting Test3
- L(0); L(1); U(0); U(1);
+ Lock_0_1();
L(2);
CreateAndDestroyManyLocks();
U(2);
- L(1); L(0); U(0); U(1);
+ Lock_1_0();
// CHECK: WARNING: ThreadSanitizer: lock-order-inversion (potential deadlock)
// CHECK-NOT: WARNING: ThreadSanitizer:
}
@@ -90,15 +90,27 @@ class LockTest {
void Test4() {
fprintf(stderr, "Starting Test4\n");
// CHECK: Starting Test4
- L(0); L(1); U(0); U(1);
+ Lock_0_1();
L(2);
CreateLockUnlockAndDestroyManyLocks();
U(2);
- L(1); L(0); U(0); U(1);
+ Lock_1_0();
+ // CHECK-NOT: WARNING: ThreadSanitizer:
+ }
+
+ void Test5() {
+ fprintf(stderr, "Starting Test5\n");
+ // CHECK: Starting Test5
+ RunThreads(&LockTest::Lock_0_1, &LockTest::Lock_1_0);
+ // CHECK: WARNING: ThreadSanitizer: lock-order-inversion
// CHECK-NOT: WARNING: ThreadSanitizer:
}
private:
+ void Lock2(size_t l1, size_t l2) { L(l1); L(l2); U(l2); U(l1); }
+ void Lock_0_1() { Lock2(0, 1); }
+ void Lock_1_0() { Lock2(1, 0); }
+
void CreateAndDestroyManyLocks() {
PaddedLock create_many_locks_but_never_acquire[kDeadlockGraphSize];
}
@@ -109,6 +121,30 @@ class LockTest {
many_locks[i].unlock();
}
}
+
+ // LockTest Member function callback.
+ struct CB {
+ void (LockTest::*f)();
+ LockTest *lt;
+ };
+
+ // Thread function with CB.
+ static void *Thread(void *param) {
+ CB *cb = (CB*)param;
+ (cb->lt->*cb->f)();
+ return NULL;
+ }
+
+ void RunThreads(void (LockTest::*f1)(), void (LockTest::*f2)()) {
+ const int kNumThreads = 2;
+ pthread_t t[kNumThreads];
+ CB cb[2] = {{f1, this}, {f2, this}};
+ for (int i = 0; i < kNumThreads; i++)
+ pthread_create(&t[i], 0, Thread, &cb[i]);
+ for (int i = 0; i < kNumThreads; i++)
+ pthread_join(t[i], 0);
+ }
+
static const size_t kDeadlockGraphSize = 4096;
size_t n_;
PaddedLock *locks_;
@@ -119,6 +155,7 @@ int main() {
{ LockTest t(5); t.Test2(); }
{ LockTest t(5); t.Test3(); }
{ LockTest t(5); t.Test4(); }
+ { LockTest t(5); t.Test5(); }
fprintf(stderr, "DONE\n");
// CHECK: DONE
}