summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2013-02-01 09:42:06 +0000
committerDmitry Vyukov <dvyukov@google.com>2013-02-01 09:42:06 +0000
commit334553ec45d8982df45a6f5e656e068142ecde3f (patch)
tree04f3f4c5f58d0e846bd0eb01687f6dfbeeb07383
parent859778a4e2dffa4024fa3e13b105fd62eca44b1c (diff)
tsan: detect races between plain and atomic memory accesses
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@174163 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/tsan/go/tsan_go.cc8
-rw-r--r--lib/tsan/lit_tests/atomic_norace.cc61
-rw-r--r--lib/tsan/lit_tests/atomic_race.cc76
-rw-r--r--lib/tsan/rtl/tsan_defs.h2
-rw-r--r--lib/tsan/rtl/tsan_fd.cc10
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc8
-rw-r--r--lib/tsan/rtl/tsan_interface.cc8
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc22
-rw-r--r--lib/tsan/rtl/tsan_interface_inl.h18
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc19
-rw-r--r--lib/tsan/rtl/tsan_rtl.h86
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc14
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc2
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc22
-rw-r--r--lib/tsan/rtl/tsan_update_shadow_word_inl.h14
-rw-r--r--lib/tsan/tests/unit/tsan_shadow_test.cc2
16 files changed, 283 insertions, 89 deletions
diff --git a/lib/tsan/go/tsan_go.cc b/lib/tsan/go/tsan_go.cc
index 77f600b8a..c9c533a79 100644
--- a/lib/tsan/go/tsan_go.cc
+++ b/lib/tsan/go/tsan_go.cc
@@ -107,23 +107,23 @@ void __tsan_map_shadow(uptr addr, uptr size) {
}
void __tsan_read(ThreadState *thr, void *addr, void *pc) {
- MemoryAccess(thr, (uptr)pc, (uptr)addr, 0, false);
+ MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1);
}
void __tsan_write(ThreadState *thr, void *addr, void *pc) {
- MemoryAccess(thr, (uptr)pc, (uptr)addr, 0, true);
+ MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1);
}
void __tsan_read_range(ThreadState *thr, void *addr, uptr size, uptr step,
void *pc) {
for (uptr i = 0; i < size; i += step)
- MemoryAccess(thr, (uptr)pc, (uptr)addr + i, 0, false);
+ MemoryRead(thr, (uptr)pc, (uptr)addr + i, kSizeLog1);
}
void __tsan_write_range(ThreadState *thr, void *addr, uptr size, uptr step,
void *pc) {
for (uptr i = 0; i < size; i += step)
- MemoryAccess(thr, (uptr)pc, (uptr)addr + i, 0, true);
+ MemoryWrite(thr, (uptr)pc, (uptr)addr + i, kSizeLog1);
}
void __tsan_func_enter(ThreadState *thr, void *pc) {
diff --git a/lib/tsan/lit_tests/atomic_norace.cc b/lib/tsan/lit_tests/atomic_norace.cc
new file mode 100644
index 000000000..265459b07
--- /dev/null
+++ b/lib/tsan/lit_tests/atomic_norace.cc
@@ -0,0 +1,61 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+
+const int kTestCount = 4;
+typedef long long T;
+T atomics[kTestCount * 2];
+
+void Test(int test, T *p, bool main_thread) {
+ volatile T sink;
+ if (test == 0) {
+ if (main_thread)
+ __atomic_fetch_add(p, 1, __ATOMIC_RELAXED);
+ else
+ __atomic_fetch_add(p, 1, __ATOMIC_RELAXED);
+ } else if (test == 1) {
+ if (main_thread)
+ __atomic_exchange_n(p, 1, __ATOMIC_ACQ_REL);
+ else
+ __atomic_exchange_n(p, 1, __ATOMIC_ACQ_REL);
+ } else if (test == 2) {
+ if (main_thread)
+ sink = __atomic_load_n(p, __ATOMIC_SEQ_CST);
+ else
+ __atomic_store_n(p, 1, __ATOMIC_SEQ_CST);
+ } else if (test == 3) {
+ if (main_thread)
+ sink = __atomic_load_n(p, __ATOMIC_SEQ_CST);
+ else
+ sink = *p;
+ }
+}
+
+void *Thread(void *p) {
+ for (int i = 0; i < kTestCount; i++) {
+ Test(i, &atomics[i], false);
+ }
+ sleep(2);
+ for (int i = 0; i < kTestCount; i++) {
+ fprintf(stderr, "Test %d reverse\n", i);
+ Test(i, &atomics[kTestCount + i], false);
+ }
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ sleep(1);
+ for (int i = 0; i < kTestCount; i++) {
+ fprintf(stderr, "Test %d\n", i);
+ Test(i, &atomics[i], true);
+ }
+ for (int i = 0; i < kTestCount; i++) {
+ Test(i, &atomics[kTestCount + i], true);
+ }
+ pthread_join(t, 0);
+}
+
+// CHECK-NOT: ThreadSanitizer: data race
diff --git a/lib/tsan/lit_tests/atomic_race.cc b/lib/tsan/lit_tests/atomic_race.cc
new file mode 100644
index 000000000..cec5809a2
--- /dev/null
+++ b/lib/tsan/lit_tests/atomic_race.cc
@@ -0,0 +1,76 @@
+// RUN: %clangxx_tsan -O1 %s -o %t && %t 2>&1 | FileCheck %s
+#include <pthread.h>
+#include <unistd.h>
+#include <stdio.h>
+
+const int kTestCount = 4;
+typedef long long T;
+T atomics[kTestCount * 2];
+
+void Test(int test, T *p, bool main_thread) {
+ volatile T sink;
+ if (test == 0) {
+ if (main_thread)
+ __atomic_fetch_add(p, 1, __ATOMIC_RELAXED);
+ else
+ *p = 42;
+ } else if (test == 1) {
+ if (main_thread)
+ __atomic_fetch_add(p, 1, __ATOMIC_RELAXED);
+ else
+ sink = *p;
+ } else if (test == 2) {
+ if (main_thread)
+ sink = __atomic_load_n(p, __ATOMIC_SEQ_CST);
+ else
+ *p = 42;
+ } else if (test == 3) {
+ if (main_thread)
+ __atomic_store_n(p, 1, __ATOMIC_SEQ_CST);
+ else
+ sink = *p;
+ }
+}
+
+void *Thread(void *p) {
+ for (int i = 0; i < kTestCount; i++) {
+ Test(i, &atomics[i], false);
+ }
+ sleep(2);
+ for (int i = 0; i < kTestCount; i++) {
+ fprintf(stderr, "Test %d reverse\n", i);
+ Test(i, &atomics[kTestCount + i], false);
+ }
+ return 0;
+}
+
+int main() {
+ pthread_t t;
+ pthread_create(&t, 0, Thread, 0);
+ sleep(1);
+ for (int i = 0; i < kTestCount; i++) {
+ fprintf(stderr, "Test %d\n", i);
+ Test(i, &atomics[i], true);
+ }
+ for (int i = 0; i < kTestCount; i++) {
+ Test(i, &atomics[kTestCount + i], true);
+ }
+ pthread_join(t, 0);
+}
+
+// CHECK: Test 0
+// CHECK: ThreadSanitizer: data race
+// CHECK: Test 1
+// CHECK: ThreadSanitizer: data race
+// CHECK: Test 2
+// CHECK: ThreadSanitizer: data race
+// CHECK: Test 3
+// CHECK: ThreadSanitizer: data race
+// CHECK: Test 0 reverse
+// CHECK: ThreadSanitizer: data race
+// CHECK: Test 1 reverse
+// CHECK: ThreadSanitizer: data race
+// CHECK: Test 2 reverse
+// CHECK: ThreadSanitizer: data race
+// CHECK: Test 3 reverse
+// CHECK: ThreadSanitizer: data race
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index b18a174d4..5c5ab9ead 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -40,7 +40,7 @@ const char *const kTsanOptionsEnv = "TSAN_OPTIONS";
const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
-const int kClkBits = 43;
+const int kClkBits = 42;
#ifndef TSAN_GO
const int kShadowStackSize = 4 * 1024;
const int kTraceStackSize = 256;
diff --git a/lib/tsan/rtl/tsan_fd.cc b/lib/tsan/rtl/tsan_fd.cc
index ef375a4d9..b3cb88488 100644
--- a/lib/tsan/rtl/tsan_fd.cc
+++ b/lib/tsan/rtl/tsan_fd.cc
@@ -150,7 +150,7 @@ void FdAcquire(ThreadState *thr, uptr pc, int fd) {
FdDesc *d = fddesc(thr, pc, fd);
FdSync *s = d->sync;
DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
- MemoryRead8Byte(thr, pc, (uptr)d);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
if (s)
Acquire(thr, pc, (uptr)s);
}
@@ -161,20 +161,20 @@ void FdRelease(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
if (s)
Release(thr, pc, (uptr)s);
- MemoryRead8Byte(thr, pc, (uptr)d);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
}
void FdAccess(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
FdDesc *d = fddesc(thr, pc, fd);
- MemoryRead8Byte(thr, pc, (uptr)d);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
}
void FdClose(ThreadState *thr, uptr pc, int fd) {
DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
FdDesc *d = fddesc(thr, pc, fd);
// To catch races between fd usage and close.
- MemoryWrite8Byte(thr, pc, (uptr)d);
+ MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
// We need to clear it, because if we do not intercept any call out there
// that creates fd, we will hit false postives.
MemoryResetRange(thr, pc, (uptr)d, 8);
@@ -193,7 +193,7 @@ void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) {
DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
// Ignore the case when user dups not yet connected socket.
FdDesc *od = fddesc(thr, pc, oldfd);
- MemoryRead8Byte(thr, pc, (uptr)od);
+ MemoryRead(thr, pc, (uptr)od, kSizeLog8);
FdClose(thr, pc, newfd);
init(thr, pc, newfd, ref(od->sync));
}
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index d1a4a67a7..8b8508785 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -1004,14 +1004,14 @@ TSAN_INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
- MemoryWrite1Byte(thr, pc, (uptr)b);
+ MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
int res = REAL(pthread_barrier_init)(b, a, count);
return res;
}
TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
- MemoryWrite1Byte(thr, pc, (uptr)b);
+ MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
int res = REAL(pthread_barrier_destroy)(b);
return res;
}
@@ -1019,9 +1019,9 @@ TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
Release(thr, pc, (uptr)b);
- MemoryRead1Byte(thr, pc, (uptr)b);
+ MemoryRead(thr, pc, (uptr)b, kSizeLog1);
int res = REAL(pthread_barrier_wait)(b);
- MemoryRead1Byte(thr, pc, (uptr)b);
+ MemoryRead(thr, pc, (uptr)b, kSizeLog1);
if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
Acquire(thr, pc, (uptr)b);
}
diff --git a/lib/tsan/rtl/tsan_interface.cc b/lib/tsan/rtl/tsan_interface.cc
index 6d0954602..dd06bbe4b 100644
--- a/lib/tsan/rtl/tsan_interface.cc
+++ b/lib/tsan/rtl/tsan_interface.cc
@@ -24,13 +24,13 @@ void __tsan_init() {
}
void __tsan_read16(void *addr) {
- MemoryRead8Byte(cur_thread(), CALLERPC, (uptr)addr);
- MemoryRead8Byte(cur_thread(), CALLERPC, (uptr)addr + 8);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
}
void __tsan_write16(void *addr) {
- MemoryWrite8Byte(cur_thread(), CALLERPC, (uptr)addr);
- MemoryWrite8Byte(cur_thread(), CALLERPC, (uptr)addr + 8);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
}
void __tsan_acquire(void *addr) {
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index a9d75e5bf..faaaccf7a 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -224,19 +224,36 @@ a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
/**/
template<typename T>
+static int SizeLog() {
+ if (sizeof(T) <= 1)
+ return kSizeLog1;
+ else if (sizeof(T) <= 2)
+ return kSizeLog2;
+ else if (sizeof(T) <= 4)
+ return kSizeLog4;
+ else
+ return kSizeLog8;
+ // For 16-byte atomics we also use 8-byte memory access,
+ // this leads to false negatives only in very obscure cases.
+}
+
+template<typename T>
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
morder mo) {
CHECK(IsLoadOrder(mo));
// This fast-path is critical for performance.
// Assume the access is atomic.
- if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
+ if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return *a;
+ }
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->clock.acquire(&s->clock);
T v = *a;
s->mtx.ReadUnlock();
__sync_synchronize();
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
return v;
}
@@ -244,6 +261,7 @@ template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
// This fast-path is critical for performance.
// Assume the access is atomic.
// Strictly saying even relaxed store cuts off release sequence,
@@ -265,6 +283,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
template<typename T, T (*F)(volatile T *v, T op)>
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch());
if (IsAcqRelOrder(mo))
@@ -324,6 +343,7 @@ template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc,
volatile T *a, T *c, T v, morder mo, morder fmo) {
(void)fmo; // Unused because llvm does not pass it yet.
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch());
if (IsAcqRelOrder(mo))
diff --git a/lib/tsan/rtl/tsan_interface_inl.h b/lib/tsan/rtl/tsan_interface_inl.h
index 8a92155d5..29e2b21ea 100644
--- a/lib/tsan/rtl/tsan_interface_inl.h
+++ b/lib/tsan/rtl/tsan_interface_inl.h
@@ -19,41 +19,41 @@
using namespace __tsan; // NOLINT
void __tsan_read1(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 0, 0);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
}
void __tsan_read2(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, 0);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
}
void __tsan_read4(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, 0);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
}
void __tsan_read8(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 3, 0);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
}
void __tsan_write1(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 0, 1);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
}
void __tsan_write2(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, 1);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
}
void __tsan_write4(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, 1);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
}
void __tsan_write8(void *addr) {
- MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 3, 1);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
}
void __tsan_vptr_update(void **vptr_p, void *new_val) {
CHECK_EQ(sizeof(vptr_p), 8);
if (*vptr_p != new_val)
- MemoryAccess(cur_thread(), CALLERPC, (uptr)vptr_p, 3, 1);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)vptr_p, kSizeLog8);
}
void __tsan_func_entry(void *pc) {
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index b28b0d010..855d645db 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -371,18 +371,6 @@ static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
#endif
}
-static inline bool BothReads(Shadow s, int kAccessIsWrite) {
- return !kAccessIsWrite && !s.is_write();
-}
-
-static inline bool OldIsRWNotWeaker(Shadow old, int kAccessIsWrite) {
- return old.is_write() || !kAccessIsWrite;
-}
-
-static inline bool OldIsRWWeakerOrEqual(Shadow old, int kAccessIsWrite) {
- return !old.is_write() || kAccessIsWrite;
-}
-
static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
return old.epoch() >= thr->fast_synch_epoch;
}
@@ -393,7 +381,7 @@ static inline bool HappensBefore(Shadow old, ThreadState *thr) {
ALWAYS_INLINE
void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
StatInc(thr, StatMop);
StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
@@ -467,7 +455,7 @@ void MemoryAccessImpl(ThreadState *thr, uptr addr,
ALWAYS_INLINE
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite) {
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
u64 *shadow_mem = (u64*)MemToShadow(addr);
DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
" is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
@@ -494,12 +482,13 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
cur.SetWrite(kAccessIsWrite);
+ cur.SetAtomic(kIsAtomic);
// We must not store to the trace if we do not store to the shadow.
// That is, this call must be moved somewhere below.
TraceAddEvent(thr, fast_state, EventTypeMop, pc);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
shadow_mem, cur);
}
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index 617c6d9b1..1d57060d9 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -173,6 +173,7 @@ class FastState {
// freed : 1
// tid : kTidBits
// epoch : kClkBits
+// is_atomic : 1
// is_write : 1
// size_log : 2
// addr0 : 3
@@ -200,10 +201,23 @@ class Shadow : public FastState {
DCHECK_EQ(x_ & 32, 0);
if (kAccessIsWrite)
x_ |= 32;
- DCHECK_EQ(kAccessIsWrite, is_write());
+ DCHECK_EQ(kAccessIsWrite, IsWrite());
}
- bool IsZero() const { return x_ == 0; }
+ void SetAtomic(bool kIsAtomic) {
+ DCHECK(!IsAtomic());
+ if (kIsAtomic)
+ x_ |= kAtomicBit;
+ DCHECK_EQ(IsAtomic(), kIsAtomic);
+ }
+
+ bool IsAtomic() const {
+ return x_ & kAtomicBit;
+ }
+
+ bool IsZero() const {
+ return x_ == 0;
+ }
static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
@@ -250,7 +264,8 @@ class Shadow : public FastState {
}
u64 addr0() const { return x_ & 7; }
u64 size() const { return 1ull << size_log(); }
- bool is_write() const { return x_ & 32; }
+ bool IsWrite() const { return x_ & 32; }
+ bool IsRead() const { return !IsWrite(); }
// The idea behind the freed bit is as follows.
// When the memory is freed (or otherwise unaccessible) we write to the shadow
@@ -271,7 +286,36 @@ class Shadow : public FastState {
return res;
}
+ bool IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+ // analyzes 5-th bit (is_write) and 6-th bit (is_atomic)
+ bool v = ((x_ ^ kWriteBit)
+ & u64(((kIsWrite ^ 1) << kWriteShift) | (kIsAtomic << kAtomicShift)));
+ DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
+ return v;
+ }
+
+ bool IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+ bool v = (((x_ >> kWriteShift) & 3) ^ 1)
+ <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
+ return v;
+ }
+
+ bool IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+ bool v = (((x_ >> kWriteShift) & 3) ^ 1)
+ >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
+ return v;
+ }
+
private:
+ static const u64 kWriteShift = 5;
+ static const u64 kWriteBit = 1ull << kWriteShift;
+ static const u64 kAtomicShift = 6;
+ static const u64 kAtomicBit = 1ull << kAtomicShift;
+
u64 size_log() const { return (x_ >> 3) & 3; }
static bool TwoRangesIntersectSLOW(const Shadow s1, const Shadow s2) {
@@ -535,16 +579,38 @@ SyncVar* GetJavaSync(ThreadState *thr, uptr pc, uptr addr,
SyncVar* GetAndRemoveJavaSync(ThreadState *thr, uptr pc, uptr addr);
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite);
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur);
-void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr);
-void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr);
-void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr);
-void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr);
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
- uptr size, bool is_write);
+ uptr size, bool is_write);
+
+const int kSizeLog1 = 0;
+const int kSizeLog2 = 1;
+const int kSizeLog4 = 2;
+const int kSizeLog8 = 3;
+
+void ALWAYS_INLINE INLINE MemoryRead(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
+}
+
+void ALWAYS_INLINE INLINE MemoryWrite(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
+}
+
+void ALWAYS_INLINE INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
+}
+
+void ALWAYS_INLINE INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
+}
+
void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index d812f12be..6c86b0665 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -27,7 +27,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
DPrintf("#%d: MutexCreate %zx\n", thr->tid, addr);
StatInc(thr, StatMutexCreate);
if (!linker_init && IsAppMem(addr))
- MemoryWrite1Byte(thr, pc, addr);
+ MemoryWrite(thr, pc, addr, kSizeLog1);
SyncVar *s = ctx->synctab.GetOrCreateAndLock(thr, pc, addr, true);
s->is_rw = rw;
s->is_recursive = recursive;
@@ -50,7 +50,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
if (s == 0)
return;
if (IsAppMem(addr))
- MemoryWrite1Byte(thr, pc, addr);
+ MemoryWrite(thr, pc, addr, kSizeLog1);
if (flags()->report_destroy_locked
&& s->owner_tid != SyncVar::kInvalidTid
&& !s->is_broken) {
@@ -75,7 +75,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexLock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
- MemoryRead1Byte(thr, pc, addr);
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
@@ -108,7 +108,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
- MemoryRead1Byte(thr, pc, addr);
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
@@ -146,7 +146,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadLock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadLock);
if (IsAppMem(addr))
- MemoryRead1Byte(thr, pc, addr);
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, false);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
@@ -167,7 +167,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
StatInc(thr, StatMutexReadUnlock);
if (IsAppMem(addr))
- MemoryRead1Byte(thr, pc, addr);
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
thr->fast_state.IncrementEpoch();
TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
@@ -188,7 +188,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
CHECK_GT(thr->in_rtl, 0);
DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
if (IsAppMem(addr))
- MemoryRead1Byte(thr, pc, addr);
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, addr, true);
bool write = true;
if (s->owner_tid == SyncVar::kInvalidTid) {
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index d28c69db7..31eaac157 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -150,7 +150,7 @@ void ScopedReport::AddMemoryAccess(uptr addr, Shadow s,
mop->tid = s.tid();
mop->addr = addr + s.addr0();
mop->size = s.size();
- mop->write = s.is_write();
+ mop->write = s.IsWrite();
mop->stack = SymbolizeStack(*stack);
for (uptr i = 0; i < mset->Size(); i++) {
MutexSet::Desc d = mset->Get(i);
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index 0c74d57e3..3af42e48f 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -397,7 +397,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetWrite(is_write);
cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
shadow_mem, cur);
}
if (unaligned)
@@ -408,7 +408,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetWrite(is_write);
cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
shadow_mem, cur);
shadow_mem += kShadowCnt;
}
@@ -418,24 +418,8 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetWrite(is_write);
cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
shadow_mem, cur);
}
}
-
-void MemoryRead1Byte(ThreadState *thr, uptr pc, uptr addr) {
- MemoryAccess(thr, pc, addr, 0, 0);
-}
-
-void MemoryWrite1Byte(ThreadState *thr, uptr pc, uptr addr) {
- MemoryAccess(thr, pc, addr, 0, 1);
-}
-
-void MemoryRead8Byte(ThreadState *thr, uptr pc, uptr addr) {
- MemoryAccess(thr, pc, addr, 3, 0);
-}
-
-void MemoryWrite8Byte(ThreadState *thr, uptr pc, uptr addr) {
- MemoryAccess(thr, pc, addr, 3, 1);
-}
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_update_shadow_word_inl.h b/lib/tsan/rtl/tsan_update_shadow_word_inl.h
index 0ca90cc4a..e7c036c5d 100644
--- a/lib/tsan/rtl/tsan_update_shadow_word_inl.h
+++ b/lib/tsan/rtl/tsan_update_shadow_word_inl.h
@@ -34,7 +34,7 @@ do {
if (Shadow::TidsAreEqual(old, cur)) {
StatInc(thr, StatShadowSameThread);
if (OldIsInSameSynchEpoch(old, thr)) {
- if (OldIsRWNotWeaker(old, kAccessIsWrite)) {
+ if (old.IsRWNotWeaker(kAccessIsWrite, kIsAtomic)) {
// found a slot that holds effectively the same info
// (that is, same tid, same sync epoch and same size)
StatInc(thr, StatMopSame);
@@ -43,7 +43,7 @@ do {
StoreIfNotYetStored(sp, &store_word);
break;
}
- if (OldIsRWWeakerOrEqual(old, kAccessIsWrite))
+ if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))
StoreIfNotYetStored(sp, &store_word);
break;
}
@@ -52,12 +52,12 @@ do {
StoreIfNotYetStored(sp, &store_word);
break;
}
- if (BothReads(old, kAccessIsWrite))
+ if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
break;
goto RACE;
}
// Do the memory access intersect?
- // In Go all memory accesses are 1 byte, so there can't be no intersections.
+ // In Go all memory accesses are 1 byte, so there can be no intersections.
if (kCppMode && Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
StatInc(thr, StatShadowIntersect);
if (Shadow::TidsAreEqual(old, cur)) {
@@ -65,12 +65,10 @@ do {
break;
}
StatInc(thr, StatShadowAnotherThread);
- if (HappensBefore(old, thr))
+ if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
break;
-
- if (BothReads(old, kAccessIsWrite))
+ if (HappensBefore(old, thr))
break;
-
goto RACE;
}
// The accesses do not intersect.
diff --git a/lib/tsan/tests/unit/tsan_shadow_test.cc b/lib/tsan/tests/unit/tsan_shadow_test.cc
index fa9c982c0..d37a26935 100644
--- a/lib/tsan/tests/unit/tsan_shadow_test.cc
+++ b/lib/tsan/tests/unit/tsan_shadow_test.cc
@@ -25,7 +25,7 @@ TEST(Shadow, FastState) {
EXPECT_EQ(s.GetHistorySize(), 0);
EXPECT_EQ(s.addr0(), (u64)0);
EXPECT_EQ(s.size(), (u64)1);
- EXPECT_EQ(s.is_write(), false);
+ EXPECT_EQ(s.IsWrite(), false);
s.IncrementEpoch();
EXPECT_EQ(s.epoch(), (u64)23);