summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2014-03-17 12:27:42 +0000
committerKostya Serebryany <kcc@google.com>2014-03-17 12:27:42 +0000
commitd80aa42b1be94402b1630ef9893bfe468046ba2c (patch)
tree23d388a375b6e49e58e562c11f93134d3e0890d2 /lib
parente07bce06c994605e6c6cd9120ce833653d350fd3 (diff)
[sanitizer] allow to store the lock context (stack trace id) with all currently held locks in a thread. This will allow the deadlock detector to provide better warnings (more changes to go)
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@204039 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/sanitizer_common/sanitizer_deadlock_detector.h58
-rw-r--r--lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc38
2 files changed, 84 insertions, 12 deletions
diff --git a/lib/sanitizer_common/sanitizer_deadlock_detector.h b/lib/sanitizer_common/sanitizer_deadlock_detector.h
index dfaacb3d2..a37310178 100644
--- a/lib/sanitizer_common/sanitizer_deadlock_detector.h
+++ b/lib/sanitizer_common/sanitizer_deadlock_detector.h
@@ -41,6 +41,7 @@ class DeadlockDetectorTLS {
bv_.clear();
epoch_ = 0;
n_recursive_locks = 0;
+ n_all_locks_ = 0;
}
bool empty() const { return bv_.empty(); }
@@ -54,7 +55,7 @@ class DeadlockDetectorTLS {
uptr getEpoch() const { return epoch_; }
// Returns true if this is the first (non-recursive) acquisition of this lock.
- bool addLock(uptr lock_id, uptr current_epoch) {
+ bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {
// Printf("addLock: %zx %zx\n", lock_id, current_epoch);
CHECK_EQ(epoch_, current_epoch);
if (!bv_.setBit(lock_id)) {
@@ -63,6 +64,12 @@ class DeadlockDetectorTLS {
recursive_locks[n_recursive_locks++] = lock_id;
return false;
}
+ if (stk) {
+ CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));
+ LockWithContext &l = all_locks_with_contexts_[n_all_locks_++];
+ l.lock = static_cast<u32>(lock_id); // lock_id is small index into bv_.
+ l.stk = stk;
+ }
return true;
}
@@ -78,6 +85,23 @@ class DeadlockDetectorTLS {
}
// Printf("remLock: %zx %zx\n", lock_id, epoch_);
CHECK(bv_.clearBit(lock_id));
+ if (n_all_locks_) {
+ for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
+ Swap(all_locks_with_contexts_[i],
+ all_locks_with_contexts_[n_all_locks_ - 1]);
+ n_all_locks_--;
+ break;
+ }
+ }
+ }
+ }
+
+ u32 findLockContext(uptr lock_id) {
+ for (uptr i = 0; i < n_all_locks_; i++)
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))
+ return all_locks_with_contexts_[i].stk;
+ return 0;
}
const BV &getLocks(uptr current_epoch) const {
@@ -90,6 +114,12 @@ class DeadlockDetectorTLS {
uptr epoch_;
uptr recursive_locks[64];
uptr n_recursive_locks;
+ struct LockWithContext {
+ u32 lock;
+ u32 stk;
+ };
+ LockWithContext all_locks_with_contexts_[64];
+ uptr n_all_locks_;
};
// DeadlockDetector.
@@ -165,11 +195,15 @@ class DeadlockDetector {
return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
}
+ u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ return dtls->findLockContext(nodeToIndex(node));
+ }
+
// Add cur_node to the set of locks held currently by dtls.
- void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
ensureCurrentEpoch(dtls);
uptr cur_idx = nodeToIndex(cur_node);
- dtls->addLock(cur_idx, current_epoch_);
+ dtls->addLock(cur_idx, current_epoch_, stk);
}
// Experimental *racy* fast path function.
@@ -189,7 +223,7 @@ class DeadlockDetector {
// returns the number of added edges, and puts the sources of added edges
// into added_edges[].
// Should be called before onLockAfter.
- uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk) {
+ uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
ensureCurrentEpoch(dtls);
uptr cur_idx = nodeToIndex(cur_node);
uptr added_edges[40];
@@ -215,11 +249,11 @@ class DeadlockDetector {
// Test-only function. Handles the before/after lock events,
// returns true if there is a cycle.
- bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
ensureCurrentEpoch(dtls);
bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);
addEdges(dtls, cur_node, 0);
- onLockAfter(dtls, cur_node);
+ onLockAfter(dtls, cur_node, stk);
return is_reachable;
}
@@ -228,20 +262,20 @@ class DeadlockDetector {
// to add this lock to the currently held locks, but we should not try to
// change the lock graph or to detect a cycle. We may want to investigate
// whether a more aggressive strategy is possible for try_lock.
- bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
ensureCurrentEpoch(dtls);
uptr cur_idx = nodeToIndex(cur_node);
- dtls->addLock(cur_idx, current_epoch_);
+ dtls->addLock(cur_idx, current_epoch_, stk);
return false;
}
// Returns true iff dtls is empty (no locks are currently held) and we can
// add the node to the currently held locks w/o chanding the global state.
// This operation is thread-safe as it only touches the dtls.
- bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
if (!dtls->empty()) return false;
if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {
- dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node));
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
return true;
}
return false;
@@ -274,9 +308,9 @@ class DeadlockDetector {
// Returns true on success.
// This operation is thread-safe as it only touches the dtls
// (modulo racy nature of hasAllEdges).
- bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
if (hasAllEdges(dtls, node)) {
- dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node));
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), 0);
return true;
}
return false;
diff --git a/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc b/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc
index 36d384531..a11b5a514 100644
--- a/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_deadlock_detector_test.cc
@@ -406,3 +406,41 @@ void RunRecusriveLockTest() {
TEST(DeadlockDetector, RecusriveLockTest) {
RunRecusriveLockTest<BV2>();
}
+
+template <class BV>
+void RunLockContextTest() {
+ ScopedDD<BV> sdd;
+ DeadlockDetector<BV> &d = *sdd.dp;
+ DeadlockDetectorTLS<BV> &dtls = sdd.dtls;
+
+ uptr l0 = d.newNode(0);
+ uptr l1 = d.newNode(0);
+ uptr l2 = d.newNode(0);
+ uptr l3 = d.newNode(0);
+ uptr l4 = d.newNode(0);
+ EXPECT_FALSE(d.onLock(&dtls, l0, 10));
+ EXPECT_FALSE(d.onLock(&dtls, l1, 11));
+ EXPECT_FALSE(d.onLock(&dtls, l2, 12));
+ EXPECT_FALSE(d.onLock(&dtls, l3, 13));
+ EXPECT_EQ(10U, d.findLockContext(&dtls, l0));
+ EXPECT_EQ(11U, d.findLockContext(&dtls, l1));
+ EXPECT_EQ(12U, d.findLockContext(&dtls, l2));
+ EXPECT_EQ(13U, d.findLockContext(&dtls, l3));
+ d.onUnlock(&dtls, l0);
+ EXPECT_EQ(0U, d.findLockContext(&dtls, l0));
+ EXPECT_EQ(11U, d.findLockContext(&dtls, l1));
+ EXPECT_EQ(12U, d.findLockContext(&dtls, l2));
+ EXPECT_EQ(13U, d.findLockContext(&dtls, l3));
+ d.onUnlock(&dtls, l2);
+ EXPECT_EQ(0U, d.findLockContext(&dtls, l0));
+ EXPECT_EQ(11U, d.findLockContext(&dtls, l1));
+ EXPECT_EQ(0U, d.findLockContext(&dtls, l2));
+ EXPECT_EQ(13U, d.findLockContext(&dtls, l3));
+
+ EXPECT_FALSE(d.onLock(&dtls, l4, 14));
+ EXPECT_EQ(14U, d.findLockContext(&dtls, l4));
+}
+
+TEST(DeadlockDetector, LockContextTest) {
+ RunLockContextTest<BV2>();
+}