summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_sync.cc
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2016-04-27 08:23:02 +0000
committerDmitry Vyukov <dvyukov@google.com>2016-04-27 08:23:02 +0000
commit69ca20b8dfd4f0d89602642bee9684e6a540613e (patch)
tree489878e905f202d257ed84b5f5ce51eec4461155 /lib/tsan/rtl/tsan_sync.cc
parent81bba04449f2b776158dedd9fd9843506942e8e4 (diff)
tsan: split thread into logical and physical state
This is reincarnation of http://reviews.llvm.org/D17648 with the bug fix pointed out by Adhemerval (zatrazz). Currently ThreadState holds both logical state (required for race-detection algorithm, user-visible) and physical state (various caches, most notably malloc cache). Move physical state in a new Process entity. Besides just being the right thing from abstraction point of view, this solves several problems: Cache everything on P level in Go. Currently we cache on a mix of goroutine and OS thread levels. This unnecessary increases memory consumption. Properly handle free operations in Go. Frees are issue by GC which don't have goroutine context. As the result we could not do anything more than just clearing shadow. For example, we leaked sync objects and heap block descriptors. This will allow to get rid of libc malloc in Go (now we have Processor context for internal allocator cache). This in turn will allow to get rid of dependency on libc entirely. Potentially we can make Processor per-CPU in C++ mode instead of per-thread, which will reduce resource consumption. The distinction between Thread and Processor is currently used only by Go, C++ creates Processor per OS thread, which is equivalent to the current scheme. git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@267678 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/tsan/rtl/tsan_sync.cc')
-rw-r--r--lib/tsan/rtl/tsan_sync.cc46
1 files changed, 23 insertions, 23 deletions
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index 4202d30a2..4a85e61b6 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -36,7 +36,7 @@ void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
DDMutexInit(thr, pc, this);
}
-void SyncVar::Reset(ThreadState *thr) {
+void SyncVar::Reset(Processor *proc) {
uid = 0;
creation_stack_id = 0;
owner_tid = kInvalidTid;
@@ -47,12 +47,12 @@ void SyncVar::Reset(ThreadState *thr) {
is_broken = 0;
is_linker_init = 0;
- if (thr == 0) {
+ if (proc == 0) {
CHECK_EQ(clock.size(), 0);
CHECK_EQ(read_clock.size(), 0);
} else {
- clock.Reset(&thr->clock_cache);
- read_clock.Reset(&thr->clock_cache);
+ clock.Reset(&proc->clock_cache);
+ read_clock.Reset(&proc->clock_cache);
}
}
@@ -61,7 +61,7 @@ MetaMap::MetaMap() {
}
void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
- u32 idx = block_alloc_.Alloc(&thr->block_cache);
+ u32 idx = block_alloc_.Alloc(&thr->proc->block_cache);
MBlock *b = block_alloc_.Map(idx);
b->siz = sz;
b->tid = thr->tid;
@@ -71,16 +71,16 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
*meta = idx | kFlagBlock;
}
-uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
+uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
MBlock* b = GetBlock(p);
if (b == 0)
return 0;
uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
- FreeRange(thr, pc, p, sz);
+ FreeRange(proc, p, sz);
return sz;
}
-bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
bool has_something = false;
u32 *meta = MemToMeta(p);
u32 *end = MemToMeta(p + sz);
@@ -96,14 +96,14 @@ bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
has_something = true;
while (idx != 0) {
if (idx & kFlagBlock) {
- block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
+ block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
break;
} else if (idx & kFlagSync) {
DCHECK(idx & kFlagSync);
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
u32 next = s->next;
- s->Reset(thr);
- sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
+ s->Reset(proc);
+ sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
idx = next;
} else {
CHECK(0);
@@ -119,24 +119,24 @@ bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
// which can be huge. The function probes pages one-by-one until it finds a page
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
-void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
if (sz <= 4 * kPageSize) {
// If the range is small, just do the normal free procedure.
- FreeRange(thr, pc, p, sz);
+ FreeRange(proc, p, sz);
return;
}
// First, round both ends of the range to page size.
uptr diff = RoundUp(p, kPageSize) - p;
if (diff != 0) {
- FreeRange(thr, pc, p, diff);
+ FreeRange(proc, p, diff);
p += diff;
sz -= diff;
}
diff = p + sz - RoundDown(p + sz, kPageSize);
if (diff != 0) {
- FreeRange(thr, pc, p + sz - diff, diff);
+ FreeRange(proc, p + sz - diff, diff);
sz -= diff;
}
// Now we must have a non-empty page-aligned range.
@@ -147,7 +147,7 @@ void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
const uptr sz0 = sz;
// Probe start of the range.
while (sz > 0) {
- bool has_something = FreeRange(thr, pc, p, kPageSize);
+ bool has_something = FreeRange(proc, p, kPageSize);
p += kPageSize;
sz -= kPageSize;
if (!has_something)
@@ -155,7 +155,7 @@ void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
}
// Probe end of the range.
while (sz > 0) {
- bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
+ bool has_something = FreeRange(proc, p - kPageSize, kPageSize);
sz -= kPageSize;
if (!has_something)
break;
@@ -210,8 +210,8 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
if (s->addr == addr) {
if (myidx != 0) {
- mys->Reset(thr);
- sync_alloc_.Free(&thr->sync_cache, myidx);
+ mys->Reset(thr->proc);
+ sync_alloc_.Free(&thr->proc->sync_cache, myidx);
}
if (write_lock)
s->mtx.Lock();
@@ -230,7 +230,7 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
if (myidx == 0) {
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
- myidx = sync_alloc_.Alloc(&thr->sync_cache);
+ myidx = sync_alloc_.Alloc(&thr->proc->sync_cache);
mys = sync_alloc_.Map(myidx);
mys->Init(thr, pc, addr, uid);
}
@@ -279,9 +279,9 @@ void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
}
}
-void MetaMap::OnThreadIdle(ThreadState *thr) {
- block_alloc_.FlushCache(&thr->block_cache);
- sync_alloc_.FlushCache(&thr->sync_cache);
+void MetaMap::OnProcIdle(Processor *proc) {
+ block_alloc_.FlushCache(&proc->block_cache);
+ sync_alloc_.FlushCache(&proc->sync_cache);
}
} // namespace __tsan