diff options
author | Dmitry Vyukov <dvyukov@google.com> | 2014-03-04 11:39:56 +0000 |
---|---|---|
committer | Dmitry Vyukov <dvyukov@google.com> | 2014-03-04 11:39:56 +0000 |
commit | 123c2a5038b994cf2fcffac4302ce6d96a3d3ce6 (patch) | |
tree | c3c176928ec6b4ae3dc2582f35d759ed573f5c11 | |
parent | cc91c67acb92458a64bfc06f5dbd8ac429636d6e (diff) |
tsan: add concurrent hashmap for standalone deadlock detector
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@202826 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r-- | lib/sanitizer_common/sanitizer_addrhashmap.h | 207 | ||||
-rw-r--r-- | lib/sanitizer_common/sanitizer_mutex.h | 84 | ||||
-rw-r--r-- | lib/tsan/dd/dd_rtl.cc | 73 | ||||
-rw-r--r-- | lib/tsan/dd/dd_rtl.h | 11 |
4 files changed, 321 insertions, 54 deletions
diff --git a/lib/sanitizer_common/sanitizer_addrhashmap.h b/lib/sanitizer_common/sanitizer_addrhashmap.h new file mode 100644 index 000000000..b623fcc06 --- /dev/null +++ b/lib/sanitizer_common/sanitizer_addrhashmap.h @@ -0,0 +1,207 @@ +//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Concurrent uptr->T hashmap. +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_ADDRHASHMAP_H +#define SANITIZER_ADDRHASHMAP_H + +#include "sanitizer_common.h" +#include "sanitizer_mutex.h" +#include "sanitizer_atomic.h" + +namespace __sanitizer { + +// Concurrent uptr->T hashmap. +// T must be a POD type, kSize is preferrably a prime but can be any number. +// The hashmap is fixed size, it crashes on overflow. +// Usage example: +// +// typedef AddrHashMap<uptr, 11> Map; +// Map m; +// { +// Map::Handle h(&m, addr); +// use h.operator->() to access the data +// if h.created() then the element was just created, and the current thread +// has exclusive access to it +// otherwise the current thread has only read access to the data +// } +// { +// Map::Handle h(&m, addr, true); +// this will remove the data from the map in Handle dtor +// the current thread has exclusive access to the data +// if !h.exists() then the element never existed +// } +template<typename T, uptr kSize> +class AddrHashMap { + private: + struct Cell { + RWMutex mtx; + atomic_uintptr_t addr; + T val; + }; + + public: + AddrHashMap(); + + class Handle { + public: + Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove = false); + ~Handle(); + T *operator -> (); + bool created() const; + bool exists() const; + + private: + AddrHashMap<T, kSize> *map_; + Cell *cell_; + uptr addr_; + bool created_; + bool remove_; + bool write_; + }; + + private: + friend class Handle; + Cell *table_; + + static const uptr kRemoved = 1; + + Cell *acquire(uptr addr, bool remove, bool *created, bool *write); + void release(uptr addr, bool remove, bool write, Cell *c); + uptr hash(uptr addr); +}; + +template<typename T, uptr kSize> +AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr, + bool remove) { + map_ = map; + addr_ = addr; + remove_ = remove; + cell_ = map_->acquire(addr_, remove_, &created_, &write_); +} + +template<typename T, uptr kSize> +AddrHashMap<T, kSize>::Handle::~Handle() { + map_->release(addr_, remove_, write_, cell_); +} + +template<typename T, uptr kSize> +T *AddrHashMap<T, kSize>::Handle::operator -> () { + return &cell_->val; +} + +template<typename T, uptr kSize> +bool AddrHashMap<T, kSize>::Handle::created() const { + return created_; +} + +template<typename T, uptr kSize> +bool AddrHashMap<T, kSize>::Handle::exists() const { + return cell_ != 0; +} + +template<typename T, uptr kSize> +AddrHashMap<T, kSize>::AddrHashMap() { + table_ = (Cell*)MmapOrDie(kSize * sizeof(Cell), "AddrHashMap"); +} + +template<typename T, uptr kSize> +typename AddrHashMap<T, kSize>::Cell *AddrHashMap<T, kSize>::acquire(uptr addr, + bool remove, bool *created, bool *write) { + // When we access the element associated with addr, + // we lock its home cell (the cell associated with hash(addr). + // If the element was just created or is going to be removed, + // we lock the cell in write mode. Otherwise we lock in read mode. + // The locking protects the object lifetime (it's not removed while + // somebody else accesses it). And also it helps to resolve concurrent + // inserts. + // Note that the home cell is not necessary the cell where the element is + // stored. + *write = false; + *created = false; + uptr h0 = hash(addr); + Cell *c0 = &table_[h0]; + // First try to find an existing element under read lock. + if (!remove) { + c0->mtx.ReadLock(); + uptr h = h0; + for (;;) { + Cell *c = &table_[h]; + uptr addr1 = atomic_load(&c->addr, memory_order_acquire); + if (addr1 == 0) // empty cell denotes end of the cell chain for the elem + break; + if (addr1 == addr) // ok, found it + return c; + h++; + if (h == kSize) + h = 0; + CHECK_NE(h, h0); // made the full cycle + } + c0->mtx.ReadUnlock(); + } + // Now try to create it under write lock. + *write = true; + c0->mtx.Lock(); + uptr h = h0; + for (;;) { + Cell *c = &table_[h]; + uptr addr1 = atomic_load(&c->addr, memory_order_acquire); + if (addr1 == addr) // another thread has inserted it ahead of us + return c; + if (remove && addr1 == 0) { // the element has not existed before + c0->mtx.Unlock(); + return 0; + } + if (!remove && (addr1 == 0 ||addr1 == kRemoved) && + atomic_compare_exchange_strong(&c->addr, &addr1, addr, + memory_order_acq_rel)) { + // we've created the element + *created = true; + return c; + } + h++; + if (h == kSize) + h = 0; + CHECK_NE(h, h0); // made the full cycle + } +} + +template<typename T, uptr kSize> +void AddrHashMap<T, kSize>::release(uptr addr, bool remove, bool write, + Cell *c) { + if (c == 0) + return; + // if we are going to remove, we must hold write lock + CHECK_EQ(!remove || write, true); + CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), addr); + // denote that the cell is empty now + if (remove) + atomic_store(&c->addr, kRemoved, memory_order_release); + // unlock the home cell + uptr h0 = hash(addr); + Cell *c0 = &table_[h0]; + if (write) + c0->mtx.Unlock(); + else + c0->mtx.ReadUnlock(); +} + +template<typename T, uptr kSize> +uptr AddrHashMap<T, kSize>::hash(uptr addr) { + addr += addr << 10; + addr ^= addr >> 6; + return addr % kSize; +} + +} // namespace __sanitizer + +#endif // SANITIZER_ADDRHASHMAP_H diff --git a/lib/sanitizer_common/sanitizer_mutex.h b/lib/sanitizer_common/sanitizer_mutex.h index e812fce90..196c3f53e 100644 --- a/lib/sanitizer_common/sanitizer_mutex.h +++ b/lib/sanitizer_common/sanitizer_mutex.h @@ -83,6 +83,88 @@ class BlockingMutex { uptr owner_; // for debugging }; +// Reader-writer spin mutex. +class RWMutex { + public: + RWMutex() { + atomic_store(&state_, kUnlocked, memory_order_relaxed); + } + + ~RWMutex() { + CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked); + } + + void Lock() { + uptr cmp = kUnlocked; + if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock, + memory_order_acquire)) + return; + LockSlow(); + } + + void Unlock() { + uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); + DCHECK_NE(prev & kWriteLock, 0); + (void)prev; + } + + void ReadLock() { + uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); + if ((prev & kWriteLock) == 0) + return; + ReadLockSlow(); + } + + void ReadUnlock() { + uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release); + DCHECK_EQ(prev & kWriteLock, 0); + DCHECK_GT(prev & ~kWriteLock, 0); + (void)prev; + } + + void CheckLocked() { + CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked); + } + + private: + atomic_uintptr_t state_; + + enum { + kUnlocked = 0, + kWriteLock = 1, + kReadLock = 2 + }; + + void NOINLINE LockSlow() { + for (int i = 0;; i++) { + if (i < 10) + proc_yield(10); + else + internal_sched_yield(); + uptr cmp = atomic_load(&state_, memory_order_relaxed); + if (cmp == kUnlocked && + atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, + memory_order_acquire)) + return; + } + } + + void NOINLINE ReadLockSlow() { + for (int i = 0;; i++) { + if (i < 10) + proc_yield(10); + else + internal_sched_yield(); + uptr prev = atomic_load(&state_, memory_order_acquire); + if ((prev & kWriteLock) == 0) + return; + } + } + + RWMutex(const RWMutex&); + void operator = (const RWMutex&); +}; + template<typename MutexType> class GenericScopedLock { public: @@ -123,6 +205,8 @@ class GenericScopedReadLock { typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock; typedef GenericScopedLock<BlockingMutex> BlockingMutexLock; +typedef GenericScopedLock<RWMutex> RWMutexLock; +typedef GenericScopedReadLock<RWMutex> RWMutexReadLock; } // namespace __sanitizer diff --git a/lib/tsan/dd/dd_rtl.cc b/lib/tsan/dd/dd_rtl.cc index 09ea4fceb..dfc8fe813 100644 --- a/lib/tsan/dd/dd_rtl.cc +++ b/lib/tsan/dd/dd_rtl.cc @@ -9,16 +9,19 @@ #include "dd_rtl.h" #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stackdepot.h" namespace __dsan { -static Context ctx0; -static Context * const ctx = &ctx0; +static Context *ctx; void Initialize() { + static u64 ctx_mem[sizeof(Context) / sizeof(u64) + 1]; + ctx = new(ctx_mem) Context(); + InitializeInterceptors(); //common_flags()->allow_addr2line = true; common_flags()->symbolize = true; @@ -37,9 +40,9 @@ void ThreadDestroy(Thread *thr) { static u32 CurrentStackTrace(Thread *thr) { StackTrace trace; - thr->in_symbolizer = true; + thr->ignore_interceptors = true; trace.Unwind(1000, 0, 0, 0, 0, 0, false); - thr->in_symbolizer = false; + thr->ignore_interceptors = false; const uptr skip = 4; if (trace.size <= skip) return 0; @@ -49,39 +52,9 @@ static u32 CurrentStackTrace(Thread *thr) { static void PrintStackTrace(Thread *thr, u32 stk) { uptr size = 0; const uptr *trace = StackDepotGet(stk, &size); - thr->in_symbolizer = true; + thr->ignore_interceptors = true; StackTrace::PrintStack(trace, size); - thr->in_symbolizer = false; -} - -static Mutex *FindMutex(Thread *thr, uptr m) { - SpinMutexLock l(&ctx->mutex_mtx); - for (Mutex *mtx = ctx->mutex_list; mtx; mtx = mtx->link) { - if (mtx->addr == m) - return mtx; - } - Mutex *mtx = (Mutex*)InternalAlloc(sizeof(*mtx)); - internal_memset(mtx, 0, sizeof(*mtx)); - mtx->addr = m; - ctx->dd->MutexInit(&mtx->dd, CurrentStackTrace(thr), ctx->mutex_seq++); - mtx->link = ctx->mutex_list; - ctx->mutex_list = mtx; - return mtx; -} - -static Mutex *FindMutexAndRemove(uptr m) { - SpinMutexLock l(&ctx->mutex_mtx); - Mutex **prev = &ctx->mutex_list; - for (;;) { - Mutex *mtx = *prev; - if (mtx == 0) - return 0; - if (mtx->addr == m) { - *prev = mtx->link; - return mtx; - } - prev = &mtx->link; - } + thr->ignore_interceptors = false; } static void ReportDeadlock(Thread *thr, DDReport *rep) { @@ -96,30 +69,34 @@ static void ReportDeadlock(Thread *thr, DDReport *rep) { } void MutexLock(Thread *thr, uptr m, bool writelock, bool trylock) { - if (thr->in_symbolizer) + if (thr->ignore_interceptors) return; - Mutex *mtx = FindMutex(thr, m); - DDReport *rep = ctx->dd->MutexLock(thr->dd_pt, thr->dd_lt, &mtx->dd, - writelock, trylock); + DDReport *rep = 0; + { + MutexHashMap::Handle h(&ctx->mutex_map, m); + if (h.created()) + ctx->dd->MutexInit(&h->dd, CurrentStackTrace(thr), m); + rep = ctx->dd->MutexLock(thr->dd_pt, thr->dd_lt, &h->dd, + writelock, trylock); + } if (rep) ReportDeadlock(thr, rep); } void MutexUnlock(Thread *thr, uptr m, bool writelock) { - if (thr->in_symbolizer) + if (thr->ignore_interceptors) return; - Mutex *mtx = FindMutex(thr, m); - ctx->dd->MutexUnlock(thr->dd_pt, thr->dd_lt, &mtx->dd, writelock); + MutexHashMap::Handle h(&ctx->mutex_map, m); + ctx->dd->MutexUnlock(thr->dd_pt, thr->dd_lt, &h->dd, writelock); } void MutexDestroy(Thread *thr, uptr m) { - if (thr->in_symbolizer) + if (thr->ignore_interceptors) return; - Mutex *mtx = FindMutexAndRemove(m); - if (mtx == 0) + MutexHashMap::Handle h(&ctx->mutex_map, m, true); + if (!h.exists()) return; - ctx->dd->MutexDestroy(thr->dd_pt, thr->dd_lt, &mtx->dd); - InternalFree(mtx); + ctx->dd->MutexDestroy(thr->dd_pt, thr->dd_lt, &h->dd); } } // namespace __dsan diff --git a/lib/tsan/dd/dd_rtl.h b/lib/tsan/dd/dd_rtl.h index 016200900..4485dbdd7 100644 --- a/lib/tsan/dd/dd_rtl.h +++ b/lib/tsan/dd/dd_rtl.h @@ -12,13 +12,12 @@ #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_deadlock_detector_interface.h" #include "sanitizer_common/sanitizer_allocator_internal.h" +#include "sanitizer_common/sanitizer_addrhashmap.h" #include "sanitizer_common/sanitizer_mutex.h" namespace __dsan { struct Mutex { - Mutex *link; - uptr addr; DDMutex dd; }; @@ -26,15 +25,15 @@ struct Thread { DDPhysicalThread *dd_pt; DDLogicalThread *dd_lt; - bool in_symbolizer; + bool ignore_interceptors; }; +typedef AddrHashMap<Mutex, 1000003> MutexHashMap; + struct Context { DDetector *dd; - SpinMutex mutex_mtx; - Mutex *mutex_list; - u64 mutex_seq; + MutexHashMap mutex_map; }; void InitializeInterceptors(); |