summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_sync.cc
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2016-02-26 18:26:48 +0000
committerDmitry Vyukov <dvyukov@google.com>2016-02-26 18:26:48 +0000
commitd7dd5fc8457724232353da5d0209952dd9c9f966 (patch)
tree6ab79994991c07fe537475a5e8ca2124da1a53ee /lib/tsan/rtl/tsan_sync.cc
parent9f1a781323756a455a053f2401cbb3fbe5ecf1c4 (diff)
tsan: revert r262037
Broke aarch64 and darwin bots. git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@262046 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/tsan/rtl/tsan_sync.cc')
-rw-r--r--lib/tsan/rtl/tsan_sync.cc46
1 files changed, 23 insertions, 23 deletions
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index 4a85e61b6..4202d30a2 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -36,7 +36,7 @@ void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
DDMutexInit(thr, pc, this);
}
-void SyncVar::Reset(Processor *proc) {
+void SyncVar::Reset(ThreadState *thr) {
uid = 0;
creation_stack_id = 0;
owner_tid = kInvalidTid;
@@ -47,12 +47,12 @@ void SyncVar::Reset(Processor *proc) {
is_broken = 0;
is_linker_init = 0;
- if (proc == 0) {
+ if (thr == 0) {
CHECK_EQ(clock.size(), 0);
CHECK_EQ(read_clock.size(), 0);
} else {
- clock.Reset(&proc->clock_cache);
- read_clock.Reset(&proc->clock_cache);
+ clock.Reset(&thr->clock_cache);
+ read_clock.Reset(&thr->clock_cache);
}
}
@@ -61,7 +61,7 @@ MetaMap::MetaMap() {
}
void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
- u32 idx = block_alloc_.Alloc(&thr->proc->block_cache);
+ u32 idx = block_alloc_.Alloc(&thr->block_cache);
MBlock *b = block_alloc_.Map(idx);
b->siz = sz;
b->tid = thr->tid;
@@ -71,16 +71,16 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
*meta = idx | kFlagBlock;
}
-uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
+uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
MBlock* b = GetBlock(p);
if (b == 0)
return 0;
uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
- FreeRange(proc, p, sz);
+ FreeRange(thr, pc, p, sz);
return sz;
}
-bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
+bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
bool has_something = false;
u32 *meta = MemToMeta(p);
u32 *end = MemToMeta(p + sz);
@@ -96,14 +96,14 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
has_something = true;
while (idx != 0) {
if (idx & kFlagBlock) {
- block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
+ block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
break;
} else if (idx & kFlagSync) {
DCHECK(idx & kFlagSync);
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
u32 next = s->next;
- s->Reset(proc);
- sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
+ s->Reset(thr);
+ sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
idx = next;
} else {
CHECK(0);
@@ -119,24 +119,24 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
// which can be huge. The function probes pages one-by-one until it finds a page
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
-void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
+void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
if (sz <= 4 * kPageSize) {
// If the range is small, just do the normal free procedure.
- FreeRange(proc, p, sz);
+ FreeRange(thr, pc, p, sz);
return;
}
// First, round both ends of the range to page size.
uptr diff = RoundUp(p, kPageSize) - p;
if (diff != 0) {
- FreeRange(proc, p, diff);
+ FreeRange(thr, pc, p, diff);
p += diff;
sz -= diff;
}
diff = p + sz - RoundDown(p + sz, kPageSize);
if (diff != 0) {
- FreeRange(proc, p + sz - diff, diff);
+ FreeRange(thr, pc, p + sz - diff, diff);
sz -= diff;
}
// Now we must have a non-empty page-aligned range.
@@ -147,7 +147,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
const uptr sz0 = sz;
// Probe start of the range.
while (sz > 0) {
- bool has_something = FreeRange(proc, p, kPageSize);
+ bool has_something = FreeRange(thr, pc, p, kPageSize);
p += kPageSize;
sz -= kPageSize;
if (!has_something)
@@ -155,7 +155,7 @@ void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
}
// Probe end of the range.
while (sz > 0) {
- bool has_something = FreeRange(proc, p - kPageSize, kPageSize);
+ bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
sz -= kPageSize;
if (!has_something)
break;
@@ -210,8 +210,8 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
if (s->addr == addr) {
if (myidx != 0) {
- mys->Reset(thr->proc);
- sync_alloc_.Free(&thr->proc->sync_cache, myidx);
+ mys->Reset(thr);
+ sync_alloc_.Free(&thr->sync_cache, myidx);
}
if (write_lock)
s->mtx.Lock();
@@ -230,7 +230,7 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
if (myidx == 0) {
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
- myidx = sync_alloc_.Alloc(&thr->proc->sync_cache);
+ myidx = sync_alloc_.Alloc(&thr->sync_cache);
mys = sync_alloc_.Map(myidx);
mys->Init(thr, pc, addr, uid);
}
@@ -279,9 +279,9 @@ void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
}
}
-void MetaMap::OnProcIdle(Processor *proc) {
- block_alloc_.FlushCache(&proc->block_cache);
- sync_alloc_.FlushCache(&proc->sync_cache);
+void MetaMap::OnThreadIdle(ThreadState *thr) {
+ block_alloc_.FlushCache(&thr->block_cache);
+ sync_alloc_.FlushCache(&thr->sync_cache);
}
} // namespace __tsan