From 69ca20b8dfd4f0d89602642bee9684e6a540613e Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Wed, 27 Apr 2016 08:23:02 +0000 Subject: tsan: split thread into logical and physical state This is reincarnation of http://reviews.llvm.org/D17648 with the bug fix pointed out by Adhemerval (zatrazz). Currently ThreadState holds both logical state (required for race-detection algorithm, user-visible) and physical state (various caches, most notably malloc cache). Move physical state in a new Process entity. Besides just being the right thing from abstraction point of view, this solves several problems: Cache everything on P level in Go. Currently we cache on a mix of goroutine and OS thread levels. This unnecessary increases memory consumption. Properly handle free operations in Go. Frees are issue by GC which don't have goroutine context. As the result we could not do anything more than just clearing shadow. For example, we leaked sync objects and heap block descriptors. This will allow to get rid of libc malloc in Go (now we have Processor context for internal allocator cache). This in turn will allow to get rid of dependency on libc entirely. Potentially we can make Processor per-CPU in C++ mode instead of per-thread, which will reduce resource consumption. The distinction between Thread and Processor is currently used only by Go, C++ creates Processor per OS thread, which is equivalent to the current scheme. git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@267678 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/tsan/rtl/tsan_rtl_mutex.cc | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'lib/tsan/rtl/tsan_rtl_mutex.cc') diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc index c5682873a..5266cee1a 100644 --- a/lib/tsan/rtl/tsan_rtl_mutex.cc +++ b/lib/tsan/rtl/tsan_rtl_mutex.cc @@ -32,7 +32,7 @@ struct Callback : DDCallback { Callback(ThreadState *thr, uptr pc) : thr(thr) , pc(pc) { - DDCallback::pt = thr->dd_pt; + DDCallback::pt = thr->proc->dd_pt; DDCallback::lt = thr->dd_lt; } @@ -114,7 +114,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { u64 mid = s->GetId(); u32 last_lock = s->last_lock; if (!unlock_locked) - s->Reset(thr); // must not reset it before the report is printed + s->Reset(thr->proc); // must not reset it before the report is printed s->mtx.Unlock(); if (unlock_locked) { ThreadRegistryLock l(ctx->thread_registry); @@ -132,7 +132,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { if (unlock_locked) { SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr); if (s != 0) { - s->Reset(thr); + s->Reset(thr->proc); s->mtx.Unlock(); } } @@ -434,7 +434,7 @@ void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) { if (thr->ignore_sync) return; thr->clock.set(thr->fast_state.epoch()); - thr->clock.acquire(&thr->clock_cache, c); + thr->clock.acquire(&thr->proc->clock_cache, c); StatInc(thr, StatSyncAcquire); } @@ -443,7 +443,7 @@ void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { return; thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.release(&thr->clock_cache, c); + thr->clock.release(&thr->proc->clock_cache, c); StatInc(thr, StatSyncRelease); } @@ -452,7 +452,7 @@ void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) { return; thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.ReleaseStore(&thr->clock_cache, c); + thr->clock.ReleaseStore(&thr->proc->clock_cache, c); StatInc(thr, StatSyncRelease); } @@ -461,7 +461,7 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) { return; thr->clock.set(thr->fast_state.epoch()); thr->fast_synch_epoch = thr->fast_state.epoch(); - thr->clock.acq_rel(&thr->clock_cache, c); + thr->clock.acq_rel(&thr->proc->clock_cache, c); StatInc(thr, StatSyncAcquire); StatInc(thr, StatSyncRelease); } -- cgit v1.2.3