diff options
author | Dmitry Vyukov <dvyukov@google.com> | 2014-03-04 11:39:56 +0000 |
---|---|---|
committer | Dmitry Vyukov <dvyukov@google.com> | 2014-03-04 11:39:56 +0000 |
commit | 123c2a5038b994cf2fcffac4302ce6d96a3d3ce6 (patch) | |
tree | c3c176928ec6b4ae3dc2582f35d759ed573f5c11 /lib/sanitizer_common/sanitizer_mutex.h | |
parent | cc91c67acb92458a64bfc06f5dbd8ac429636d6e (diff) |
tsan: add concurrent hashmap for standalone deadlock detector
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@202826 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_mutex.h')
-rw-r--r-- | lib/sanitizer_common/sanitizer_mutex.h | 84 |
1 files changed, 84 insertions, 0 deletions
diff --git a/lib/sanitizer_common/sanitizer_mutex.h b/lib/sanitizer_common/sanitizer_mutex.h index e812fce90..196c3f53e 100644 --- a/lib/sanitizer_common/sanitizer_mutex.h +++ b/lib/sanitizer_common/sanitizer_mutex.h @@ -83,6 +83,88 @@ class BlockingMutex { uptr owner_; // for debugging }; +// Reader-writer spin mutex. +class RWMutex { + public: + RWMutex() { + atomic_store(&state_, kUnlocked, memory_order_relaxed); + } + + ~RWMutex() { + CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); + } + + void Lock() { + uptr cmp = kUnlocked; + if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, + memory_order_acquire)) + return; + LockSlow(); + } + + void Unlock() { + uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); + DCHECK_NE(prev & kWriteLock, 0); + (void)prev; + } + + void ReadLock() { + uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); + if ((prev & kWriteLock) == 0) + return; + ReadLockSlow(); + } + + void ReadUnlock() { + uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); + DCHECK_EQ(prev & kWriteLock, 0); + DCHECK_GT(prev & ~kWriteLock, 0); + (void)prev; + } + + void CheckLocked() { + CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked); + } + + private: + atomic_uintptr_t state_; + + enum { + kUnlocked = 0, + kWriteLock = 1, + kReadLock = 2 + }; + + void NOINLINE LockSlow() { + for (int i = 0;; i++) { + if (i < 10) + proc_yield(10); + else + internal_sched_yield(); + uptr cmp = atomic_load(&state_, memory_order_relaxed); + if (cmp == kUnlocked && + atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, + memory_order_acquire)) + return; + } + } + + void NOINLINE ReadLockSlow() { + for (int i = 0;; i++) { + if (i < 10) + proc_yield(10); + else + internal_sched_yield(); + uptr prev = atomic_load(&state_, memory_order_acquire); + if ((prev & kWriteLock) == 0) + return; + } + } + + RWMutex(const RWMutex&); + void operator = (const RWMutex&); +}; + template<typename MutexType> class GenericScopedLock { public: @@ -123,6 +205,8 @@ class GenericScopedReadLock { typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock; typedef GenericScopedLock<BlockingMutex> BlockingMutexLock; +typedef GenericScopedLock<RWMutex> RWMutexLock; +typedef GenericScopedReadLock<RWMutex> RWMutexReadLock; } // namespace __sanitizer |