summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2016-04-27 08:23:02 +0000
committerDmitry Vyukov <dvyukov@google.com>2016-04-27 08:23:02 +0000
commit69ca20b8dfd4f0d89602642bee9684e6a540613e (patch)
tree489878e905f202d257ed84b5f5ce51eec4461155 /lib/tsan/rtl
parent81bba04449f2b776158dedd9fd9843506942e8e4 (diff)
tsan: split thread into logical and physical state
This is reincarnation of http://reviews.llvm.org/D17648 with the bug fix pointed out by Adhemerval (zatrazz). Currently ThreadState holds both logical state (required for race-detection algorithm, user-visible) and physical state (various caches, most notably malloc cache). Move physical state in a new Process entity. Besides just being the right thing from abstraction point of view, this solves several problems: Cache everything on P level in Go. Currently we cache on a mix of goroutine and OS thread levels. This unnecessary increases memory consumption. Properly handle free operations in Go. Frees are issue by GC which don't have goroutine context. As the result we could not do anything more than just clearing shadow. For example, we leaked sync objects and heap block descriptors. This will allow to get rid of libc malloc in Go (now we have Processor context for internal allocator cache). This in turn will allow to get rid of dependency on libc entirely. Potentially we can make Processor per-CPU in C++ mode instead of per-thread, which will reduce resource consumption. The distinction between Thread and Processor is currently used only by Go, C++ creates Processor per OS thread, which is equivalent to the current scheme. git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@267678 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/tsan/rtl')
-rw-r--r--lib/tsan/rtl/tsan_defs.h1
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc7
-rw-r--r--lib/tsan/rtl/tsan_interface_java.cc2
-rw-r--r--lib/tsan/rtl/tsan_mman.cc28
-rw-r--r--lib/tsan/rtl/tsan_mman.h4
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc6
-rw-r--r--lib/tsan/rtl/tsan_rtl.h35
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc14
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc22
-rw-r--r--lib/tsan/rtl/tsan_sync.cc46
-rw-r--r--lib/tsan/rtl/tsan_sync.h10
11 files changed, 96 insertions, 79 deletions
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index 6d2eba6a0..cdc23d0a7 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -149,6 +149,7 @@ struct MD5Hash {
MD5Hash md5_hash(const void *data, uptr size);
+struct Processor;
struct ThreadState;
class ThreadContext;
struct Context;
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index 89d7aa9b7..abc73d0a0 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -740,7 +740,7 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
if (sz != 0) {
// If sz == 0, munmap will return EINVAL and don't unmap any memory.
DontNeedShadowFor((uptr)addr, sz);
- ctx->metamap.ResetRange(thr, pc, (uptr)addr, (uptr)sz);
+ ctx->metamap.ResetRange(thr->proc, (uptr)addr, (uptr)sz);
}
int res = REAL(munmap)(addr, sz);
return res;
@@ -835,7 +835,10 @@ STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
namespace __tsan {
void DestroyThreadState() {
ThreadState *thr = cur_thread();
+ Processor *proc = thr->proc;
ThreadFinish(thr);
+ ProcUnwire(proc, thr);
+ ProcDestroy(proc);
ThreadSignalContext *sctx = thr->signal_ctx;
if (sctx) {
thr->signal_ctx = 0;
@@ -886,6 +889,8 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
#endif
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
ThreadStart(thr, tid, GetTid());
atomic_store(&p->tid, 0, memory_order_release);
}
diff --git a/lib/tsan/rtl/tsan_interface_java.cc b/lib/tsan/rtl/tsan_interface_java.cc
index 0aea63d11..fd771b46a 100644
--- a/lib/tsan/rtl/tsan_interface_java.cc
+++ b/lib/tsan/rtl/tsan_interface_java.cc
@@ -111,7 +111,7 @@ void __tsan_java_free(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- ctx->metamap.FreeRange(thr, pc, ptr, size);
+ ctx->metamap.FreeRange(thr->proc, ptr, size);
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 7247c6e00..9e31a9d83 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -67,14 +67,14 @@ void InitializeAllocator() {
allocator()->Init(common_flags()->allocator_may_return_null);
}
-void AllocatorThreadStart(ThreadState *thr) {
- allocator()->InitCache(&thr->alloc_cache);
- internal_allocator()->InitCache(&thr->internal_alloc_cache);
+void AllocatorProcStart(Processor *proc) {
+ allocator()->InitCache(&proc->alloc_cache);
+ internal_allocator()->InitCache(&proc->internal_alloc_cache);
}
-void AllocatorThreadFinish(ThreadState *thr) {
- allocator()->DestroyCache(&thr->alloc_cache);
- internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
+void AllocatorProcFinish(Processor *proc) {
+ allocator()->DestroyCache(&proc->alloc_cache);
+ internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
}
void AllocatorPrintStats() {
@@ -98,7 +98,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return allocator()->ReturnNullOrDie();
- void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
+ void *p = allocator()->Allocate(&thr->proc->alloc_cache, sz, align);
if (p == 0)
return 0;
if (ctx && ctx->initialized)
@@ -120,7 +120,7 @@ void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
if (ctx && ctx->initialized)
OnUserFree(thr, pc, (uptr)p, true);
- allocator()->Deallocate(&thr->alloc_cache, p);
+ allocator()->Deallocate(&thr->proc->alloc_cache, p);
if (signal)
SignalUnsafeCall(thr, pc);
}
@@ -136,7 +136,7 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
CHECK_NE(p, (void*)0);
- uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
+ uptr sz = ctx->metamap.FreeBlock(thr->proc, p);
DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
if (write && thr->ignore_reads_and_writes == 0)
MemoryRangeFreed(thr, pc, (uptr)p, sz);
@@ -187,7 +187,7 @@ void *internal_alloc(MBlockType typ, uptr sz) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
- return InternalAlloc(sz, &thr->internal_alloc_cache);
+ return InternalAlloc(sz, &thr->proc->internal_alloc_cache);
}
void internal_free(void *p) {
@@ -196,7 +196,7 @@ void internal_free(void *p) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
- InternalFree(p, &thr->internal_alloc_cache);
+ InternalFree(p, &thr->proc->internal_alloc_cache);
}
} // namespace __tsan
@@ -238,8 +238,8 @@ uptr __sanitizer_get_allocated_size(const void *p) {
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
- allocator()->SwallowCache(&thr->alloc_cache);
- internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
- ctx->metamap.OnThreadIdle(thr);
+ allocator()->SwallowCache(&thr->proc->alloc_cache);
+ internal_allocator()->SwallowCache(&thr->proc->internal_alloc_cache);
+ ctx->metamap.OnProcIdle(thr->proc);
}
} // extern "C"
diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h
index b419b58ca..cc9b0fa3c 100644
--- a/lib/tsan/rtl/tsan_mman.h
+++ b/lib/tsan/rtl/tsan_mman.h
@@ -21,8 +21,8 @@ const uptr kDefaultAlignment = 16;
void InitializeAllocator();
void ReplaceSystemMalloc();
-void AllocatorThreadStart(ThreadState *thr);
-void AllocatorThreadFinish(ThreadState *thr);
+void AllocatorProcStart(Processor *proc);
+void AllocatorProcFinish(Processor *proc);
void AllocatorPrintStats();
// For user allocations.
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index 4df4db557..3cbc0b569 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -329,6 +329,10 @@ void Initialize(ThreadState *thr) {
InitializeAllocator();
ReplaceSystemMalloc();
#endif
+ if (common_flags()->detect_deadlocks)
+ ctx->dd = DDetector::Create(flags());
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
InitializeInterceptors();
CheckShadowMapping();
InitializePlatform();
@@ -351,8 +355,6 @@ void Initialize(ThreadState *thr) {
SetSandboxingCallback(StopBackgroundThread);
#endif
#endif
- if (common_flags()->detect_deadlocks)
- ctx->dd = DDetector::Create(flags());
VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
(int)internal_getpid());
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index 057a3a6b7..ad3e24ea2 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -325,6 +325,26 @@ struct JmpBuf {
uptr *shadow_stack_pos;
};
+// A Processor represents a physical thread, or a P for Go.
+// It is used to store internal resources like allocate cache, and does not
+// participate in race-detection logic (invisible to end user).
+// In C++ it is tied to an OS thread just like ThreadState, however ideally
+// it should be tied to a CPU (this way we will have fewer allocator caches).
+// In Go it is tied to a P, so there are significantly fewer Processor's than
+// ThreadState's (which are tied to Gs).
+// A ThreadState must be wired with a Processor to handle events.
+struct Processor {
+ ThreadState *thr; // currently wired thread, or nullptr
+#ifndef SANITIZER_GO
+ AllocatorCache alloc_cache;
+ InternalAllocatorCache internal_alloc_cache;
+#endif
+ DenseSlabAllocCache block_cache;
+ DenseSlabAllocCache sync_cache;
+ DenseSlabAllocCache clock_cache;
+ DDPhysicalThread *dd_pt;
+};
+
// This struct is stored in TLS.
struct ThreadState {
FastState fast_state;
@@ -360,8 +380,6 @@ struct ThreadState {
MutexSet mset;
ThreadClock clock;
#ifndef SANITIZER_GO
- AllocatorCache alloc_cache;
- InternalAllocatorCache internal_alloc_cache;
Vector<JmpBuf> jmp_bufs;
int ignore_interceptors;
#endif
@@ -385,16 +403,14 @@ struct ThreadState {
#if SANITIZER_DEBUG && !SANITIZER_GO
InternalDeadlockDetector internal_deadlock_detector;
#endif
- DDPhysicalThread *dd_pt;
DDLogicalThread *dd_lt;
+ // Current wired Processor, or nullptr. Required to handle any events.
+ Processor *proc;
+
atomic_uintptr_t in_signal_handler;
ThreadSignalContext *signal_ctx;
- DenseSlabAllocCache block_cache;
- DenseSlabAllocCache sync_cache;
- DenseSlabAllocCache clock_cache;
-
#ifndef SANITIZER_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
@@ -685,6 +701,11 @@ void ThreadSetName(ThreadState *thr, const char *name);
int ThreadCount(ThreadState *thr);
void ProcessPendingSignals(ThreadState *thr);
+Processor *ProcCreate();
+void ProcDestroy(Processor *proc);
+void ProcWire(Processor *proc, ThreadState *thr);
+void ProcUnwire(Processor *proc, ThreadState *thr);
+
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init);
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index c5682873a..5266cee1a 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -32,7 +32,7 @@ struct Callback : DDCallback {
Callback(ThreadState *thr, uptr pc)
: thr(thr)
, pc(pc) {
- DDCallback::pt = thr->dd_pt;
+ DDCallback::pt = thr->proc->dd_pt;
DDCallback::lt = thr->dd_lt;
}
@@ -114,7 +114,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
u64 mid = s->GetId();
u32 last_lock = s->last_lock;
if (!unlock_locked)
- s->Reset(thr); // must not reset it before the report is printed
+ s->Reset(thr->proc); // must not reset it before the report is printed
s->mtx.Unlock();
if (unlock_locked) {
ThreadRegistryLock l(ctx->thread_registry);
@@ -132,7 +132,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
if (unlock_locked) {
SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
if (s != 0) {
- s->Reset(thr);
+ s->Reset(thr->proc);
s->mtx.Unlock();
}
}
@@ -434,7 +434,7 @@ void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
thr->clock.set(thr->fast_state.epoch());
- thr->clock.acquire(&thr->clock_cache, c);
+ thr->clock.acquire(&thr->proc->clock_cache, c);
StatInc(thr, StatSyncAcquire);
}
@@ -443,7 +443,7 @@ void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(&thr->clock_cache, c);
+ thr->clock.release(&thr->proc->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
@@ -452,7 +452,7 @@ void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.ReleaseStore(&thr->clock_cache, c);
+ thr->clock.ReleaseStore(&thr->proc->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
@@ -461,7 +461,7 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.acq_rel(&thr->clock_cache, c);
+ thr->clock.acq_rel(&thr->proc->clock_cache, c);
StatInc(thr, StatSyncAcquire);
StatInc(thr, StatSyncRelease);
}
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index dcae255f7..353e086c7 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -42,7 +42,7 @@ void ThreadContext::OnDead() {
void ThreadContext::OnJoined(void *arg) {
ThreadState *caller_thr = static_cast<ThreadState *>(arg);
AcquireImpl(caller_thr, 0, &sync);
- sync.Reset(&caller_thr->clock_cache);
+ sync.Reset(&caller_thr->proc->clock_cache);
}
struct OnCreatedArgs {
@@ -74,7 +74,7 @@ void ThreadContext::OnReset() {
void ThreadContext::OnDetached(void *arg) {
ThreadState *thr1 = static_cast<ThreadState*>(arg);
- sync.Reset(&thr1->clock_cache);
+ sync.Reset(&thr1->proc->clock_cache);
}
struct OnStartedArgs {
@@ -106,13 +106,8 @@ void ThreadContext::OnStarted(void *arg) {
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
#endif
-#ifndef SANITIZER_GO
- AllocatorThreadStart(thr);
-#endif
- if (common_flags()->detect_deadlocks) {
- thr->dd_pt = ctx->dd->CreatePhysicalThread();
+ if (common_flags()->detect_deadlocks)
thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
- }
thr->fast_state.SetHistorySize(flags()->history_size);
// Commit switch to the new part of the trace.
// TraceAddEvent will reset stack0/mset0 in the new part for us.
@@ -121,7 +116,7 @@ void ThreadContext::OnStarted(void *arg) {
thr->fast_synch_epoch = epoch0;
AcquireImpl(thr, 0, &sync);
StatInc(thr, StatSyncAcquire);
- sync.Reset(&thr->clock_cache);
+ sync.Reset(&thr->proc->clock_cache);
thr->is_inited = true;
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx\n",
@@ -138,15 +133,8 @@ void ThreadContext::OnFinished() {
}
epoch1 = thr->fast_state.epoch();
- if (common_flags()->detect_deadlocks) {
- ctx->dd->DestroyPhysicalThread(thr->dd_pt);
+ if (common_flags()->detect_deadlocks)
ctx->dd->DestroyLogicalThread(thr->dd_lt);
- }
- ctx->clock_alloc.FlushCache(&thr->clock_cache);
- ctx->metamap.OnThreadIdle(thr);
-#ifndef SANITIZER_GO
- AllocatorThreadFinish(thr);
-#endif
thr->~ThreadState();
#if TSAN_COLLECT_STATS
StatAggregate(ctx->stat, thr->stat);
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index 4202d30a2..4a85e61b6 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -36,7 +36,7 @@ void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
DDMutexInit(thr, pc, this);
}
-void SyncVar::Reset(ThreadState *thr) {
+void SyncVar::Reset(Processor *proc) {
uid = 0;
creation_stack_id = 0;
owner_tid = kInvalidTid;
@@ -47,12 +47,12 @@ void SyncVar::Reset(ThreadState *thr) {
is_broken = 0;
is_linker_init = 0;
- if (thr == 0) {
+ if (proc == 0) {
CHECK_EQ(clock.size(), 0);
CHECK_EQ(read_clock.size(), 0);
} else {
- clock.Reset(&thr->clock_cache);
- read_clock.Reset(&thr->clock_cache);
+ clock.Reset(&proc->clock_cache);
+ read_clock.Reset(&proc->clock_cache);
}
}
@@ -61,7 +61,7 @@ MetaMap::MetaMap() {
}
void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
- u32 idx = block_alloc_.Alloc(&thr->block_cache);
+ u32 idx = block_alloc_.Alloc(&thr->proc->block_cache);
MBlock *b = block_alloc_.Map(idx);
b->siz = sz;
b->tid = thr->tid;
@@ -71,16 +71,16 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
*meta = idx | kFlagBlock;
}
-uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
+uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
MBlock* b = GetBlock(p);
if (b == 0)
return 0;
uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
- FreeRange(thr, pc, p, sz);
+ FreeRange(proc, p, sz);
return sz;
}
-bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
bool has_something = false;
u32 *meta = MemToMeta(p);
u32 *end = MemToMeta(p + sz);
@@ -96,14 +96,14 @@ bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
has_something = true;
while (idx != 0) {
if (idx & kFlagBlock) {
- block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
+ block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
break;
} else if (idx & kFlagSync) {
DCHECK(idx & kFlagSync);
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
u32 next = s->next;
- s->Reset(thr);
- sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
+ s->Reset(proc);
+ sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
idx = next;
} else {
CHECK(0);
@@ -119,24 +119,24 @@ bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
// which can be huge. The function probes pages one-by-one until it finds a page
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
-void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
if (sz <= 4 * kPageSize) {
// If the range is small, just do the normal free procedure.
- FreeRange(thr, pc, p, sz);
+ FreeRange(proc, p, sz);
return;
}
// First, round both ends of the range to page size.
uptr diff = RoundUp(p, kPageSize) - p;
if (diff != 0) {
- FreeRange(thr, pc, p, diff);
+ FreeRange(proc, p, diff);
p += diff;
sz -= diff;
}
diff = p + sz - RoundDown(p + sz, kPageSize);
if (diff != 0) {
- FreeRange(thr, pc, p + sz - diff, diff);
+ FreeRange(proc, p + sz - diff, diff);
sz -= diff;
}
// Now we must have a non-empty page-aligned range.
@@ -147,7 +147,7 @@ void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
const uptr sz0 = sz;
// Probe start of the range.
while (sz > 0) {
- bool has_something = FreeRange(thr, pc, p, kPageSize);
+ bool has_something = FreeRange(proc, p, kPageSize);
p += kPageSize;
sz -= kPageSize;
if (!has_something)
@@ -155,7 +155,7 @@ void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
}
// Probe end of the range.
while (sz > 0) {
- bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
+ bool has_something = FreeRange(proc, p - kPageSize, kPageSize);
sz -= kPageSize;
if (!has_something)
break;
@@ -210,8 +210,8 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
if (s->addr == addr) {
if (myidx != 0) {
- mys->Reset(thr);
- sync_alloc_.Free(&thr->sync_cache, myidx);
+ mys->Reset(thr->proc);
+ sync_alloc_.Free(&thr->proc->sync_cache, myidx);
}
if (write_lock)
s->mtx.Lock();
@@ -230,7 +230,7 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
if (myidx == 0) {
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
- myidx = sync_alloc_.Alloc(&thr->sync_cache);
+ myidx = sync_alloc_.Alloc(&thr->proc->sync_cache);
mys = sync_alloc_.Map(myidx);
mys->Init(thr, pc, addr, uid);
}
@@ -279,9 +279,9 @@ void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
}
}
-void MetaMap::OnThreadIdle(ThreadState *thr) {
- block_alloc_.FlushCache(&thr->block_cache);
- sync_alloc_.FlushCache(&thr->sync_cache);
+void MetaMap::OnProcIdle(Processor *proc) {
+ block_alloc_.FlushCache(&proc->block_cache);
+ sync_alloc_.FlushCache(&proc->sync_cache);
}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_sync.h b/lib/tsan/rtl/tsan_sync.h
index f07ea3b97..a1d4a1640 100644
--- a/lib/tsan/rtl/tsan_sync.h
+++ b/lib/tsan/rtl/tsan_sync.h
@@ -47,7 +47,7 @@ struct SyncVar {
SyncClock clock;
void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
- void Reset(ThreadState *thr);
+ void Reset(Processor *proc);
u64 GetId() const {
// 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
@@ -72,9 +72,9 @@ class MetaMap {
MetaMap();
void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
- uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
- bool FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
- void ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ uptr FreeBlock(Processor *proc, uptr p);
+ bool FreeRange(Processor *proc, uptr p, uptr sz);
+ void ResetRange(Processor *proc, uptr p, uptr sz);
MBlock* GetBlock(uptr p);
SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
@@ -83,7 +83,7 @@ class MetaMap {
void MoveMemory(uptr src, uptr dst, uptr sz);
- void OnThreadIdle(ThreadState *thr);
+ void OnProcIdle(Processor *proc);
private:
static const u32 kFlagMask = 3u << 30;