summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2014-12-09 01:31:14 +0000
committerKostya Serebryany <kcc@google.com>2014-12-09 01:31:14 +0000
commita371b02fccce7d968732449b2f094380cfe3d4f8 (patch)
tree27b1c59ad6f6c4cf6d7ff0bfd066bb4b1f041aa2 /lib/tsan/rtl
parente10c4275cb236f88e4b4015b66c23518a46e6cab (diff)
[tsan] remove TSAN_GO in favor of SANITIZER_GO
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@223732 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/tsan/rtl')
-rw-r--r--lib/tsan/rtl/tsan_clock.cc2
-rw-r--r--lib/tsan/rtl/tsan_defs.h2
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc20
-rw-r--r--lib/tsan/rtl/tsan_mutex.cc20
-rw-r--r--lib/tsan/rtl/tsan_mutexset.h4
-rw-r--r--lib/tsan/rtl/tsan_platform.h6
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc10
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc4
-rw-r--r--lib/tsan/rtl/tsan_report.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc52
-rw-r--r--lib/tsan/rtl/tsan_rtl.h16
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc16
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc16
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc4
-rw-r--r--lib/tsan/rtl/tsan_trace.h4
16 files changed, 92 insertions, 92 deletions
diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc
index 1855f057a..f2b39a182 100644
--- a/lib/tsan/rtl/tsan_clock.cc
+++ b/lib/tsan/rtl/tsan_clock.cc
@@ -82,7 +82,7 @@
// We don't have ThreadState in these methods, so this is an ugly hack that
// works only in C++.
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
#else
# define CPP_STAT_INC(typ) (void)0
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index 0a356fb18..7ed3796b5 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -24,7 +24,7 @@
namespace __tsan {
-#ifdef TSAN_GO
+#ifdef SANITIZER_GO
const bool kGoMode = true;
const bool kCppMode = false;
const char *const kTsanOptionsEnv = "GORACE";
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index ceb32bd60..9b6995116 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -32,7 +32,7 @@ typedef unsigned char a8;
typedef unsigned short a16; // NOLINT
typedef unsigned int a32;
typedef unsigned long long a64; // NOLINT
-#if !defined(TSAN_GO) && (defined(__SIZEOF_INT128__) \
+#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302))
__extension__ typedef __int128 a128;
# define __TSAN_HAS_INT128 1
@@ -40,7 +40,7 @@ __extension__ typedef __int128 a128;
# define __TSAN_HAS_INT128 0
#endif
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Protects emulation of 128-bit atomic operations.
static StaticSpinMutex mutex128;
#endif
@@ -125,7 +125,7 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(TSAN_GO)
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO)
a128 func_xchg(volatile a128 *v, a128 op) {
SpinMutexLock lock(&mutex128);
a128 cmp = *v;
@@ -198,7 +198,7 @@ static int SizeLog() {
// this leads to false negatives only in very obscure cases.
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static atomic_uint8_t *to_atomic(const volatile a8 *a) {
return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
}
@@ -234,7 +234,7 @@ static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
return atomic_load(to_atomic(a), to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(TSAN_GO)
+#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
SpinMutexLock lock(&mutex128);
return *a;
@@ -264,7 +264,7 @@ static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
atomic_store(to_atomic(a), v, to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(TSAN_GO)
+#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
SpinMutexLock lock(&mutex128);
*a = v;
@@ -456,7 +456,7 @@ static T AtomicCAS(ThreadState *thr, uptr pc,
return c;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void NoTsanAtomicFence(morder mo) {
__sync_synchronize();
}
@@ -468,7 +468,7 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
#endif
// Interface functions follow.
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// C/C++
@@ -867,7 +867,7 @@ void __tsan_atomic_signal_fence(morder mo) {
}
} // extern "C"
-#else // #ifndef TSAN_GO
+#else // #ifndef SANITIZER_GO
// Go
@@ -950,4 +950,4 @@ void __tsan_go_atomic64_compare_exchange(
*(bool*)(a+24) = (cur == cmp);
}
} // extern "C"
-#endif // #ifndef TSAN_GO
+#endif // #ifndef SANITIZER_GO
diff --git a/lib/tsan/rtl/tsan_mutex.cc b/lib/tsan/rtl/tsan_mutex.cc
index 55d6e18f2..9ea9bae21 100644
--- a/lib/tsan/rtl/tsan_mutex.cc
+++ b/lib/tsan/rtl/tsan_mutex.cc
@@ -25,7 +25,7 @@ namespace __tsan {
// then Report mutex can be locked while under Threads mutex.
// The leaf mutexes can be locked under any other mutexes.
// Recursive locking is not supported.
-#if TSAN_DEBUG && !TSAN_GO
+#if TSAN_DEBUG && !SANITIZER_GO
const MutexType MutexTypeLeaf = (MutexType)-1;
static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*0 MutexTypeInvalid*/ {},
@@ -47,7 +47,7 @@ static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
#endif
void InitializeMutex() {
-#if TSAN_DEBUG && !TSAN_GO
+#if TSAN_DEBUG && !SANITIZER_GO
// Build the "can lock" adjacency matrix.
// If [i][j]==true, then one can lock mutex j while under mutex i.
const int N = MutexTypeCount;
@@ -128,7 +128,7 @@ InternalDeadlockDetector::InternalDeadlockDetector() {
// Rely on zero initialization because some mutexes can be locked before ctor.
}
-#if TSAN_DEBUG && !TSAN_GO
+#if TSAN_DEBUG && !SANITIZER_GO
void InternalDeadlockDetector::Lock(MutexType t) {
// Printf("LOCK %d @%zu\n", t, seq_ + 1);
CHECK_GT(t, MutexTypeInvalid);
@@ -170,7 +170,7 @@ void InternalDeadlockDetector::CheckNoLocks() {
#endif
void CheckNoLocks(ThreadState *thr) {
-#if TSAN_DEBUG && !TSAN_GO
+#if TSAN_DEBUG && !SANITIZER_GO
thr->internal_deadlock_detector.CheckNoLocks();
#endif
}
@@ -222,7 +222,7 @@ Mutex::~Mutex() {
}
void Mutex::Lock() {
-#if TSAN_DEBUG && !TSAN_GO
+#if TSAN_DEBUG && !SANITIZER_GO
cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr cmp = kUnlocked;
@@ -234,7 +234,7 @@ void Mutex::Lock() {
cmp = kUnlocked;
if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
memory_order_acquire)) {
-#if TSAN_COLLECT_STATS && !TSAN_GO
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
@@ -247,13 +247,13 @@ void Mutex::Unlock() {
uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
(void)prev;
DCHECK_NE(prev & kWriteLock, 0);
-#if TSAN_DEBUG && !TSAN_GO
+#if TSAN_DEBUG && !SANITIZER_GO
cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
void Mutex::ReadLock() {
-#if TSAN_DEBUG && !TSAN_GO
+#if TSAN_DEBUG && !SANITIZER_GO
cur_thread()->internal_deadlock_detector.Lock(type_);
#endif
uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
@@ -262,7 +262,7 @@ void Mutex::ReadLock() {
for (Backoff backoff; backoff.Do();) {
prev = atomic_load(&state_, memory_order_acquire);
if ((prev & kWriteLock) == 0) {
-#if TSAN_COLLECT_STATS && !TSAN_GO
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
StatInc(cur_thread(), stat_type_, backoff.Contention());
#endif
return;
@@ -275,7 +275,7 @@ void Mutex::ReadUnlock() {
(void)prev;
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
-#if TSAN_DEBUG && !TSAN_GO
+#if TSAN_DEBUG && !SANITIZER_GO
cur_thread()->internal_deadlock_detector.Unlock(type_);
#endif
}
diff --git a/lib/tsan/rtl/tsan_mutexset.h b/lib/tsan/rtl/tsan_mutexset.h
index 541d181aa..68f0ec26f 100644
--- a/lib/tsan/rtl/tsan_mutexset.h
+++ b/lib/tsan/rtl/tsan_mutexset.h
@@ -43,7 +43,7 @@ class MutexSet {
}
private:
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
uptr size_;
Desc descs_[kMaxSize];
#endif
@@ -55,7 +55,7 @@ class MutexSet {
// Go does not have mutexes, so do not spend memory and time.
// (Go sync.Mutex is actually a semaphore -- can be unlocked
// in different goroutine).
-#ifdef TSAN_GO
+#ifdef SANITIZER_GO
MutexSet::MutexSet() {}
void MutexSet::Add(u64 id, bool write, u64 epoch) {}
void MutexSet::Del(u64 id, bool write) {}
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index cf52312ba..270a7519d 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -24,7 +24,7 @@
namespace __tsan {
-#if !defined(TSAN_GO)
+#if !defined(SANITIZER_GO)
/*
C/C++ on linux and freebsd
@@ -102,7 +102,7 @@ static USED uptr UserRegions[] = {
kHeapMemBeg, kHeapMemEnd,
};
-#elif defined(TSAN_GO) && !SANITIZER_WINDOWS
+#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS
/* Go on linux, darwin and freebsd
0000 0000 1000 - 0000 1000 0000: executable
@@ -164,7 +164,7 @@ static USED uptr UserRegions[] = {
kAppMemBeg, kAppMemEnd,
};
-#elif defined(TSAN_GO) && SANITIZER_WINDOWS
+#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS
/* Go on windows
0000 0000 1000 - 0000 1000 0000: executable
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index e48e86fb9..4dcfa5585 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -87,7 +87,7 @@ void FillProfileCallback(uptr p, uptr rss, bool file,
mem[MemShadow] += rss;
else if (p >= kMetaShadowBeg && p < kMetaShadowEnd)
mem[MemMeta] += rss;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
else if (p >= kHeapMemBeg && p < kHeapMemEnd)
mem[MemHeap] += rss;
else if (p >= kLoAppMemBeg && p < kLoAppMemEnd)
@@ -132,7 +132,7 @@ void FlushShadowMemory() {
#endif
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void ProtectRange(uptr beg, uptr end) {
CHECK_LE(beg, end);
if (beg == end)
@@ -303,7 +303,7 @@ static void CheckAndProtect() {
ProtectRange(kTraceMemEnd, kHeapMemBeg);
ProtectRange(kHeapMemEnd + PrimaryAllocator::AdditionalSize(), kHiAppMemBeg);
}
-#endif // #ifndef TSAN_GO
+#endif // #ifndef SANITIZER_GO
void InitializePlatform() {
DisableCoreDumperIfNecessary();
@@ -337,7 +337,7 @@ void InitializePlatform() {
ReExec();
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
CheckAndProtect();
InitTlsSize();
InitDataSeg();
@@ -348,7 +348,7 @@ bool IsGlobalVar(uptr addr) {
return g_data_start && addr >= g_data_start && addr < g_data_end;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Extract file descriptors passed to glibc internal __res_iclose function.
// This is required to properly "close" the fds, because we do not see internal
// closes within glibc. The code is a pure hack.
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
index 40f6239f3..15b9f9d2c 100644
--- a/lib/tsan/rtl/tsan_platform_mac.cc
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -50,7 +50,7 @@ void FlushShadowMemory() {
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
void InitializeShadowMemory() {
uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg,
kShadowEnd - kShadowBeg);
@@ -73,7 +73,7 @@ void InitializePlatform() {
DisableCoreDumperIfNecessary();
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
void *abstime), void *c, void *m, void *abstime,
void(*cleanup)(void *arg), void *arg) {
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
index 255793198..c22f12a1b 100644
--- a/lib/tsan/rtl/tsan_report.cc
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -71,7 +71,7 @@ ReportDesc::~ReportDesc() {
// FIXME(dvyukov): it must be leaking a lot of memory.
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
@@ -344,7 +344,7 @@ void PrintReport(const ReportDesc *rep) {
Printf("==================\n");
}
-#else // #ifndef TSAN_GO
+#else // #ifndef SANITIZER_GO
const int kMainThreadId = 1;
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index 67b58bc9e..7cb7008e2 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -43,7 +43,7 @@ extern "C" void __tsan_resume() {
namespace __tsan {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
@@ -73,7 +73,7 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
return new(mem) ThreadContext(tid);
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static const u32 kThreadQuarantineSize = 16;
#else
static const u32 kThreadQuarantineSize = 64;
@@ -102,7 +102,7 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
// , ignore_reads_and_writes()
// , ignore_interceptors()
, clock(tid, reuse_count)
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
, jmp_bufs(MBlockJmpBuf)
#endif
, tid(tid)
@@ -111,7 +111,7 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
, stk_size(stk_size)
, tls_addr(tls_addr)
, tls_size(tls_size)
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
, last_sleep_clock(tid)
#endif
{
@@ -127,7 +127,7 @@ static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
}
static void BackgroundThread(void *arg) {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// This is a non-initialized non-user thread, nothing to see here.
// We don't use ScopedIgnoreInterceptors, because we want ignores to be
// enabled even when the thread function exits (e.g. during pthread thread
@@ -191,7 +191,7 @@ static void BackgroundThread(void *arg) {
if (mprof_fd != kInvalidFd)
MemoryProfiler(ctx, mprof_fd, i);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Flush symbolizer cache if requested.
if (flags()->flush_symbolizer_ms > 0) {
u64 last = atomic_load(&ctx->last_symbolize_time_ns,
@@ -211,7 +211,7 @@ static void StartBackgroundThread() {
ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void StopBackgroundThread() {
atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
internal_join_thread(ctx->background_thread);
@@ -308,7 +308,7 @@ void Initialize(ThreadState *thr) {
ctx = new(ctx_placeholder) Context;
const char *options = GetEnv(kTsanOptionsEnv);
InitializeFlags(&ctx->flags, options);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
InitializeAllocator();
#endif
InitializeInterceptors();
@@ -316,18 +316,18 @@ void Initialize(ThreadState *thr) {
InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
InitializeShadowMemory();
#endif
// Setup correct file descriptor for error reports.
__sanitizer_set_report_path(common_flags()->log_path);
InitializeSuppressions();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
InitializeLibIgnore();
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
#endif
StartBackgroundThread();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
SetSandboxingCallback(StopBackgroundThread);
#endif
if (common_flags()->detect_deadlocks)
@@ -364,7 +364,7 @@ int Finalize(ThreadState *thr) {
CommonSanitizerReportMutex.Unlock();
ctx->report_mtx.Unlock();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (common_flags()->verbosity)
AllocatorPrintStats();
#endif
@@ -373,7 +373,7 @@ int Finalize(ThreadState *thr) {
if (ctx->nreported) {
failed = true;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
#else
Printf("Found %d data race(s)\n", ctx->nreported);
@@ -388,7 +388,7 @@ int Finalize(ThreadState *thr) {
if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (flags()->print_benign)
PrintMatchedBenignRaces();
#endif
@@ -400,7 +400,7 @@ int Finalize(ThreadState *thr) {
return failed ? flags()->exitcode : 0;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
void ForkBefore(ThreadState *thr, uptr pc) {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
@@ -433,7 +433,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc) {
}
#endif
-#ifdef TSAN_GO
+#ifdef SANITIZER_GO
NOINLINE
void GrowShadowStack(ThreadState *thr) {
const int sz = thr->shadow_stack_end - thr->shadow_stack;
@@ -452,7 +452,7 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
if (thr->shadow_stack_pos == 0) // May happen during bootstrap.
return 0;
if (pc != 0) {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -498,7 +498,7 @@ uptr TraceParts() {
return TraceSize() / kTracePartSize;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
extern "C" void __tsan_trace_switch() {
TraceSwitch(cur_thread());
}
@@ -531,7 +531,7 @@ void HandleRace(ThreadState *thr, u64 *shadow_mem,
thr->racy_state[0] = cur.raw();
thr->racy_state[1] = old.raw();
thr->racy_shadow_addr = shadow_mem;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
HACKY_CALL(__tsan_report_race);
#else
ReportRace(thr);
@@ -917,7 +917,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -937,7 +937,7 @@ void FuncExit(ThreadState *thr) {
}
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#endif
thr->shadow_stack_pos--;
@@ -948,7 +948,7 @@ void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -960,7 +960,7 @@ void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
CHECK_GE(thr->ignore_reads_and_writes, 0);
if (thr->ignore_reads_and_writes == 0) {
thr->fast_state.ClearIgnoreBit();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
thr->mop_ignore_set.Reset();
#endif
}
@@ -970,7 +970,7 @@ void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -980,7 +980,7 @@ void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
thr->ignore_sync--;
CHECK_GE(thr->ignore_sync, 0);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (thr->ignore_sync == 0)
thr->sync_ignore_set.Reset();
#endif
@@ -1014,7 +1014,7 @@ void build_consistency_shadow8() {}
} // namespace __tsan
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Must be included in this file to make sure everything is inlined.
#include "tsan_interface_inl.h"
#endif
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index cce2e2d8b..8d8868751 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -52,7 +52,7 @@
namespace __tsan {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
struct MapUnmapCallback;
typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
@@ -332,7 +332,7 @@ struct ThreadState {
int ignore_reads_and_writes;
int ignore_sync;
// Go does not support ignores.
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
IgnoreSet mop_ignore_set;
IgnoreSet sync_ignore_set;
#endif
@@ -345,7 +345,7 @@ struct ThreadState {
u64 racy_state[2];
MutexSet mset;
ThreadClock clock;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AllocatorCache alloc_cache;
InternalAllocatorCache internal_alloc_cache;
Vector<JmpBuf> jmp_bufs;
@@ -376,7 +376,7 @@ struct ThreadState {
DenseSlabAllocCache sync_cache;
DenseSlabAllocCache clock_cache;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
#endif
@@ -391,7 +391,7 @@ struct ThreadState {
uptr tls_addr, uptr tls_size);
};
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
INLINE ThreadState *cur_thread() {
@@ -481,13 +481,13 @@ extern Context *ctx; // The one and the only global runtime context.
struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptors() {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
cur_thread()->ignore_interceptors++;
#endif
}
~ScopedIgnoreInterceptors() {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
cur_thread()->ignore_interceptors--;
#endif
}
@@ -717,7 +717,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
HACKY_CALL(__tsan_trace_switch);
#else
TraceSwitch(thr);
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index 0807869d9..ddf2b69bd 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -88,7 +88,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
StatInc(thr, StatMutexDestroy);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Global mutexes not marked as LINKER_INITIALIZED
// cause tons of not interesting reports, so just ignore it.
if (IsGlobalVar(addr))
@@ -405,7 +405,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index 889fe7bb6..e3335febb 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -66,7 +66,7 @@ static void StackStripMain(SymbolizedStack *frames) {
if (last_frame2 == 0)
return;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
const char *last = last_frame->info.function;
const char *last2 = last_frame2->info.function;
// Strip frame above 'main'
@@ -111,7 +111,7 @@ static ReportStack *SymbolizeStack(StackTrace trace) {
SymbolizedStack *top = nullptr;
for (uptr si = 0; si < trace.size; si++) {
const uptr pc = trace.trace[si];
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// We obtain the return address, but we're interested in the previous
// instruction.
const uptr pc1 = StackTrace::GetPreviousInstructionPc(pc);
@@ -204,7 +204,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
rt->stack->suppressable = suppressable;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static ThreadContext *FindThreadByUidLocked(int unique_id) {
ctx->thread_registry->CheckLocked();
for (unsigned i = 0; i < kMaxTid; i++) {
@@ -249,7 +249,7 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
#endif
void ScopedReport::AddThread(int unique_tid, bool suppressable) {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AddThread(FindThreadByUidLocked(unique_tid), suppressable);
#endif
}
@@ -304,7 +304,7 @@ void ScopedReport::AddDeadMutex(u64 id) {
void ScopedReport::AddLocation(uptr addr, uptr size) {
if (addr == 0)
return;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
@@ -354,7 +354,7 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
#endif
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
void ScopedReport::AddSleep(u32 stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
@@ -644,7 +644,7 @@ void ReportRace(ThreadState *thr) {
rep.AddLocation(addr_min, addr_max - addr_min);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
{ // NOLINT
Shadow s(thr->racy_state[1]);
if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
@@ -665,7 +665,7 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
}
void PrintCurrentStackSlow(uptr pc) {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
BufferedStackTrace *ptrace =
new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
BufferedStackTrace();
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index 256a95cae..7b7b27c02 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -30,7 +30,7 @@ ThreadContext::ThreadContext(int tid)
, epoch1() {
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
@@ -92,7 +92,7 @@ void ThreadContext::OnStarted(void *arg) {
epoch1 = (u64)-1;
new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
@@ -104,7 +104,7 @@ void ThreadContext::OnStarted(void *arg) {
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
#endif
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AllocatorThreadStart(thr);
#endif
if (common_flags()->detect_deadlocks) {
@@ -140,7 +140,7 @@ void ThreadContext::OnFinished() {
}
ctx->clock_alloc.FlushCache(&thr->clock_cache);
ctx->metamap.OnThreadIdle(thr);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
AllocatorThreadFinish(thr);
#endif
thr->~ThreadState();
@@ -148,7 +148,7 @@ void ThreadContext::OnFinished() {
thr = 0;
}
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
struct ThreadLeak {
ThreadContext *tctx;
int count;
@@ -170,7 +170,7 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
}
#endif
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == 0) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
@@ -202,7 +202,7 @@ static void ThreadCheckIgnore(ThreadState *thr) {}
void ThreadFinalize(ThreadState *thr) {
ThreadCheckIgnore(thr);
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (!flags()->report_thread_leaks)
return;
ThreadRegistryLock l(ctx->thread_registry);
@@ -267,7 +267,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
tr->Unlock();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
if (ctx->after_multithreaded_fork) {
thr->ignore_interceptors++;
ThreadIgnoreBegin(thr, 0);
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
index 2fb089305..299fc80fd 100644
--- a/lib/tsan/rtl/tsan_suppressions.cc
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -33,7 +33,7 @@ static const char *const std_suppressions =
"race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n";
// Can be overriden in frontend.
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
extern "C" const char *WEAK __tsan_default_suppressions() {
return 0;
}
@@ -46,7 +46,7 @@ static bool suppressions_inited = false;
void InitializeSuppressions() {
CHECK(!suppressions_inited);
SuppressionContext::InitIfNecessary();
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
SuppressionContext::Get()->Parse(__tsan_default_suppressions());
SuppressionContext::Get()->Parse(std_suppressions);
#endif
diff --git a/lib/tsan/rtl/tsan_trace.h b/lib/tsan/rtl/tsan_trace.h
index 7fb5ae365..1da8752f6 100644
--- a/lib/tsan/rtl/tsan_trace.h
+++ b/lib/tsan/rtl/tsan_trace.h
@@ -42,7 +42,7 @@ enum EventType {
typedef u64 Event;
struct TraceHeader {
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
BufferedStackTrace stack0; // Start stack for the trace.
#else
VarSizeStackTrace stack0;
@@ -56,7 +56,7 @@ struct TraceHeader {
struct Trace {
TraceHeader headers[kTraceParts];
Mutex mtx;
-#ifndef TSAN_GO
+#ifndef SANITIZER_GO
// Must be last to catch overflow as paging fault.
// Go shadow stack is dynamically allocated.
uptr shadow_stack[kShadowStackSize];