summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc12
-rw-r--r--lib/sanitizer_common/sanitizer_common.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_internal_defs.h4
-rw-r--r--lib/sanitizer_common/sanitizer_linux.cc2
-rw-r--r--lib/sanitizer_common/sanitizer_mac.cc10
-rw-r--r--lib/sanitizer_common/sanitizer_platform.h4
-rw-r--r--lib/sanitizer_common/sanitizer_posix_libcdep.cc6
-rw-r--r--lib/sanitizer_common/sanitizer_printf.cc2
-rw-r--r--lib/sanitizer_common/sanitizer_thread_registry.cc2
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc8
-rw-r--r--lib/tsan/go/build.bat2
-rwxr-xr-xlib/tsan/go/buildgo.sh2
-rw-r--r--lib/tsan/rtl/tsan_clock.cc2
-rw-r--r--lib/tsan/rtl/tsan_defs.h14
-rw-r--r--lib/tsan/rtl/tsan_flags.cc2
-rw-r--r--lib/tsan/rtl/tsan_flags.inc3
-rw-r--r--lib/tsan/rtl/tsan_interface.h4
-rw-r--r--lib/tsan/rtl/tsan_interface_atomic.cc18
-rw-r--r--lib/tsan/rtl/tsan_mutexset.h4
-rw-r--r--lib/tsan/rtl/tsan_platform.h22
-rw-r--r--lib/tsan/rtl/tsan_platform_linux.cc18
-rw-r--r--lib/tsan/rtl/tsan_platform_mac.cc8
-rw-r--r--lib/tsan/rtl/tsan_platform_posix.cc2
-rw-r--r--lib/tsan/rtl/tsan_report.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc54
-rw-r--r--lib/tsan/rtl/tsan_rtl.h24
-rw-r--r--lib/tsan/rtl/tsan_rtl_mutex.cc8
-rw-r--r--lib/tsan/rtl/tsan_rtl_proc.cc4
-rw-r--r--lib/tsan/rtl/tsan_rtl_report.cc16
-rw-r--r--lib/tsan/rtl/tsan_rtl_thread.cc16
-rw-r--r--lib/tsan/rtl/tsan_suppressions.cc4
-rw-r--r--lib/tsan/rtl/tsan_sync.cc4
-rw-r--r--lib/tsan/rtl/tsan_trace.h4
33 files changed, 144 insertions, 149 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index f1d1e2c25..5c10f69d6 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -21,10 +21,10 @@
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
-#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
-# ifndef SANITIZER_GO
+# if !SANITIZER_GO
extern "C" void *__libc_memalign(uptr alignment, uptr size);
# endif
extern "C" void *__libc_realloc(void *ptr, uptr size);
@@ -32,7 +32,7 @@ extern "C" void __libc_free(void *ptr);
# else
# include <stdlib.h>
# define __libc_malloc malloc
-# ifndef SANITIZER_GO
+# if !SANITIZER_GO
static void *__libc_memalign(uptr alignment, uptr size) {
void *p;
uptr error = posix_memalign(&p, alignment, size);
@@ -47,7 +47,7 @@ static void *__libc_memalign(uptr alignment, uptr size) {
static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
uptr alignment) {
(void)cache;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (alignment == 0)
return __libc_malloc(size);
else
@@ -78,7 +78,7 @@ InternalAllocator *internal_allocator() {
return 0;
}
-#else // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
@@ -131,7 +131,7 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
-#endif // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
index 70ee481ca..554c0e3c5 100644
--- a/lib/sanitizer_common/sanitizer_common.cc
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -114,7 +114,7 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
Report("ERROR: %s failed to "
"%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
SanitizerToolName, mmap_type, size, size, mem_type, err);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DumpProcessMap();
#endif
UNREACHABLE("unable to mmap");
@@ -207,7 +207,7 @@ void ReportErrorSummary(const char *error_message) {
__sanitizer_report_error_summary(buff.data());
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
if (!common_flags()->print_summary)
return;
diff --git a/lib/sanitizer_common/sanitizer_internal_defs.h b/lib/sanitizer_common/sanitizer_internal_defs.h
index a36ee6d8c..02a1e5273 100644
--- a/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -24,7 +24,7 @@
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
// FIXME find out what we need on Windows, if anything.
# define SANITIZER_WEAK_ATTRIBUTE
-#elif defined(SANITIZER_GO)
+#elif SANITIZER_GO
# define SANITIZER_INTERFACE_ATTRIBUTE
# define SANITIZER_WEAK_ATTRIBUTE
#else
@@ -32,7 +32,7 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
-#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !defined(SANITIZER_GO)
+#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !SANITIZER_GO
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
diff --git a/lib/sanitizer_common/sanitizer_linux.cc b/lib/sanitizer_common/sanitizer_linux.cc
index 5784fec9b..f08cf1daf 100644
--- a/lib/sanitizer_common/sanitizer_linux.cc
+++ b/lib/sanitizer_common/sanitizer_linux.cc
@@ -1230,7 +1230,7 @@ bool IsHandledDeadlySignal(int signum) {
return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void *internal_start_thread(void(*func)(void *arg), void *arg) {
// Start the thread with signals blocked, otherwise it can steal user signals.
__sanitizer_sigset_t set, old;
diff --git a/lib/sanitizer_common/sanitizer_mac.cc b/lib/sanitizer_common/sanitizer_mac.cc
index ba785bdc3..4a299e934 100644
--- a/lib/sanitizer_common/sanitizer_mac.cc
+++ b/lib/sanitizer_common/sanitizer_mac.cc
@@ -373,7 +373,7 @@ void InitTlsSize() {
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
*stk_addr = stack_bottom;
@@ -471,12 +471,12 @@ void *internal_start_thread(void(*func)(void *arg), void *arg) {
void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static BlockingMutex syslog_lock(LINKER_INITIALIZED);
#endif
void WriteOneLineToSyslog(const char *s) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
syslog_lock.CheckLocked();
asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
#endif
@@ -489,7 +489,7 @@ void LogMessageOnPrintf(const char *str) {
}
void LogFullErrorReport(const char *buffer) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Log with os_trace. This will make it into the crash log.
#if SANITIZER_OS_TRACE
if (GetMacosVersion() >= MACOS_VERSION_YOSEMITE) {
@@ -562,7 +562,7 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
LowLevelAllocator allocator_for_env;
diff --git a/lib/sanitizer_common/sanitizer_platform.h b/lib/sanitizer_common/sanitizer_platform.h
index 859e91273..d9a8e8df1 100644
--- a/lib/sanitizer_common/sanitizer_platform.h
+++ b/lib/sanitizer_common/sanitizer_platform.h
@@ -249,4 +249,8 @@
#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
#endif
+#if SANITIZER_GO == 0
+# define SANITIZER_GO 0
+#endif
+
#endif // SANITIZER_PLATFORM_H
diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
index 1a3e9602e..71eda4e80 100644
--- a/lib/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -128,7 +128,7 @@ void SleepForMillis(int millis) {
}
void Abort() {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// If we are handling SIGABRT, unhandle it first.
if (IsHandledDeadlySignal(SIGABRT)) {
struct sigaction sigact;
@@ -142,7 +142,7 @@ void Abort() {
}
int Atexit(void (*function)(void)) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return atexit(function);
#else
return 0;
@@ -153,7 +153,7 @@ bool SupportsColoredOutput(fd_t fd) {
return isatty(fd) != 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// TODO(glider): different tools may require different altstack size.
static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
diff --git a/lib/sanitizer_common/sanitizer_printf.cc b/lib/sanitizer_common/sanitizer_printf.cc
index 434ebb93d..f394e75b0 100644
--- a/lib/sanitizer_common/sanitizer_printf.cc
+++ b/lib/sanitizer_common/sanitizer_printf.cc
@@ -213,7 +213,7 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void OnPrint(const char *str) {
(void)str;
}
-#elif defined(SANITIZER_GO) && defined(TSAN_EXTERNAL_HOOKS)
+#elif SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)
void OnPrint(const char *str);
#else
void OnPrint(const char *str) {
diff --git a/lib/sanitizer_common/sanitizer_thread_registry.cc b/lib/sanitizer_common/sanitizer_thread_registry.cc
index 6e7ddfa64..c2b75e652 100644
--- a/lib/sanitizer_common/sanitizer_thread_registry.cc
+++ b/lib/sanitizer_common/sanitizer_thread_registry.cc
@@ -131,7 +131,7 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
tctx = context_factory_(tid);
threads_[tid] = tctx;
} else {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
SanitizerToolName, max_threads_);
#else
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index 36dc32250..e02995301 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -334,7 +334,7 @@ struct ModuleInfo {
uptr end_address;
};
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
int CompareModulesBase(const void *pl, const void *pr) {
const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr;
if (l->base_address < r->base_address)
@@ -344,7 +344,7 @@ int CompareModulesBase(const void *pl, const void *pr) {
#endif
} // namespace
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void DumpProcessMap() {
Report("Dumping process modules:\n");
ListOfModules modules;
@@ -430,7 +430,7 @@ void Abort() {
internal__exit(3);
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Read the file to extract the ImageBase field from the PE header. If ASLR is
// disabled and this virtual address is available, the loader will typically
// load the image at this address. Therefore, we call it the preferred base. Any
@@ -723,7 +723,7 @@ void InitTlsSize() {
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
*stk_addr = 0;
*stk_size = 0;
*tls_addr = 0;
diff --git a/lib/tsan/go/build.bat b/lib/tsan/go/build.bat
index 3ada9ab11..3a64a2413 100644
--- a/lib/tsan/go/build.bat
+++ b/lib/tsan/go/build.bat
@@ -1,4 +1,4 @@
type tsan_go.cc ..\rtl\tsan_interface_atomic.cc ..\rtl\tsan_clock.cc ..\rtl\tsan_flags.cc ..\rtl\tsan_md5.cc ..\rtl\tsan_mutex.cc ..\rtl\tsan_report.cc ..\rtl\tsan_rtl.cc ..\rtl\tsan_rtl_mutex.cc ..\rtl\tsan_rtl_report.cc ..\rtl\tsan_rtl_thread.cc ..\rtl\tsan_rtl_proc.cc ..\rtl\tsan_stat.cc ..\rtl\tsan_suppressions.cc ..\rtl\tsan_sync.cc ..\rtl\tsan_stack_trace.cc ..\..\sanitizer_common\sanitizer_allocator.cc ..\..\sanitizer_common\sanitizer_common.cc ..\..\sanitizer_common\sanitizer_flags.cc ..\..\sanitizer_common\sanitizer_stacktrace.cc ..\..\sanitizer_common\sanitizer_libc.cc ..\..\sanitizer_common\sanitizer_printf.cc ..\..\sanitizer_common\sanitizer_suppressions.cc ..\..\sanitizer_common\sanitizer_thread_registry.cc ..\rtl\tsan_platform_windows.cc ..\..\sanitizer_common\sanitizer_win.cc ..\..\sanitizer_common\sanitizer_deadlock_detector1.cc ..\..\sanitizer_common\sanitizer_stackdepot.cc ..\..\sanitizer_common\sanitizer_persistent_allocator.cc ..\..\sanitizer_common\sanitizer_flag_parser.cc ..\..\sanitizer_common\sanitizer_symbolizer.cc ..\..\sanitizer_common\sanitizer_termination.cc > gotsan.cc
-gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO -Wno-error=attributes -Wno-attributes -Wno-format -Wno-maybe-uninitialized -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer -std=c++11
+gcc -c -o race_windows_amd64.syso gotsan.cc -I..\rtl -I..\.. -I..\..\sanitizer_common -I..\..\..\include -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -Wno-error=attributes -Wno-attributes -Wno-format -Wno-maybe-uninitialized -DSANITIZER_DEBUG=0 -O3 -fomit-frame-pointer -std=c++11
diff --git a/lib/tsan/go/buildgo.sh b/lib/tsan/go/buildgo.sh
index 834e325bc..42d479064 100755
--- a/lib/tsan/go/buildgo.sh
+++ b/lib/tsan/go/buildgo.sh
@@ -113,7 +113,7 @@ for F in $SRCS; do
cat $F >> $DIR/gotsan.cc
done
-FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS"
+FLAGS=" -I../rtl -I../.. -I../../sanitizer_common -I../../../include -std=c++11 -m64 -Wall -fno-exceptions -fno-rtti -DSANITIZER_GO=1 -DSANITIZER_DEADLOCK_DETECTOR_VERSION=2 $OSCFLAGS"
if [ "$DEBUG" = "" ]; then
FLAGS="$FLAGS -DSANITIZER_DEBUG=0 -O3 -msse3 -fomit-frame-pointer"
else
diff --git a/lib/tsan/rtl/tsan_clock.cc b/lib/tsan/rtl/tsan_clock.cc
index 1e2050d1f..32435adfd 100644
--- a/lib/tsan/rtl/tsan_clock.cc
+++ b/lib/tsan/rtl/tsan_clock.cc
@@ -82,7 +82,7 @@
// We don't have ThreadState in these methods, so this is an ugly hack that
// works only in C++.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
#else
# define CPP_STAT_INC(typ) (void)0
diff --git a/lib/tsan/rtl/tsan_defs.h b/lib/tsan/rtl/tsan_defs.h
index cdc23d0a7..55580a5c4 100644
--- a/lib/tsan/rtl/tsan_defs.h
+++ b/lib/tsan/rtl/tsan_defs.h
@@ -29,7 +29,7 @@
#endif
#ifndef TSAN_CONTAINS_UBSAN
-# if CAN_SANITIZE_UB && !defined(SANITIZER_GO)
+# if CAN_SANITIZE_UB && !SANITIZER_GO
# define TSAN_CONTAINS_UBSAN 1
# else
# define TSAN_CONTAINS_UBSAN 0
@@ -38,19 +38,9 @@
namespace __tsan {
-#ifdef SANITIZER_GO
-const bool kGoMode = true;
-const bool kCppMode = false;
-const char *const kTsanOptionsEnv = "GORACE";
-#else
-const bool kGoMode = false;
-const bool kCppMode = true;
-const char *const kTsanOptionsEnv = "TSAN_OPTIONS";
-#endif
-
const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
#else
const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
diff --git a/lib/tsan/rtl/tsan_flags.cc b/lib/tsan/rtl/tsan_flags.cc
index 93f598616..d8d4746ab 100644
--- a/lib/tsan/rtl/tsan_flags.cc
+++ b/lib/tsan/rtl/tsan_flags.cc
@@ -61,7 +61,7 @@ void InitializeFlags(Flags *f, const char *env) {
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.allow_addr2line = true;
- if (kGoMode) {
+ if (SANITIZER_GO) {
// Does not work as expected for Go: runtime handles SIGABRT and crashes.
cf.abort_on_error = false;
// Go does not have mutexes.
diff --git a/lib/tsan/rtl/tsan_flags.inc b/lib/tsan/rtl/tsan_flags.inc
index 4fb443612..071cf427d 100644
--- a/lib/tsan/rtl/tsan_flags.inc
+++ b/lib/tsan/rtl/tsan_flags.inc
@@ -61,8 +61,9 @@ TSAN_FLAG(bool, stop_on_start, false,
"Stops on start until __tsan_resume() is called (for debugging).")
TSAN_FLAG(bool, running_on_valgrind, false,
"Controls whether RunningOnValgrind() returns true or false.")
+// There are a lot of goroutines in Go, so we use smaller history.
TSAN_FLAG(
- int, history_size, kGoMode ? 1 : 3, // There are a lot of goroutines in Go.
+ int, history_size, SANITIZER_GO ? 1 : 3,
"Per-thread history size, controls how many previous memory accesses "
"are remembered per thread. Possible values are [0..7]. "
"history_size=0 amounts to 32K memory accesses. Each next value doubles "
diff --git a/lib/tsan/rtl/tsan_interface.h b/lib/tsan/rtl/tsan_interface.h
index a228746bb..9e234e958 100644
--- a/lib/tsan/rtl/tsan_interface.h
+++ b/lib/tsan/rtl/tsan_interface.h
@@ -26,7 +26,7 @@ using __sanitizer::uptr;
extern "C" {
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
@@ -146,7 +146,7 @@ typedef unsigned char a8;
typedef unsigned short a16; // NOLINT
typedef unsigned int a32;
typedef unsigned long long a64; // NOLINT
-#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
+#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
|| (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
__extension__ typedef __int128 a128;
# define __TSAN_HAS_INT128 1
diff --git a/lib/tsan/rtl/tsan_interface_atomic.cc b/lib/tsan/rtl/tsan_interface_atomic.cc
index dc0873f79..c577bea98 100644
--- a/lib/tsan/rtl/tsan_interface_atomic.cc
+++ b/lib/tsan/rtl/tsan_interface_atomic.cc
@@ -28,7 +28,7 @@
using namespace __tsan; // NOLINT
-#if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
+#if !SANITIZER_GO && __TSAN_HAS_INT128
// Protects emulation of 128-bit atomic operations.
static StaticSpinMutex mutex128;
#endif
@@ -102,7 +102,7 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
&& __TSAN_HAS_INT128
a128 func_xchg(volatile a128 *v, a128 op) {
SpinMutexLock lock(&mutex128);
@@ -176,7 +176,7 @@ static int SizeLog() {
// this leads to false negatives only in very obscure cases.
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static atomic_uint8_t *to_atomic(const volatile a8 *a) {
return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
}
@@ -212,7 +212,7 @@ static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
return atomic_load(to_atomic(a), to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
SpinMutexLock lock(&mutex128);
return *a;
@@ -242,7 +242,7 @@ static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
atomic_store(to_atomic(a), v, to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
SpinMutexLock lock(&mutex128);
*a = v;
@@ -434,7 +434,7 @@ static T AtomicCAS(ThreadState *thr, uptr pc,
return c;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void NoTsanAtomicFence(morder mo) {
__sync_synchronize();
}
@@ -446,7 +446,7 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
#endif
// Interface functions follow.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// C/C++
@@ -845,7 +845,7 @@ void __tsan_atomic_signal_fence(morder mo) {
}
} // extern "C"
-#else // #ifndef SANITIZER_GO
+#else // #if !SANITIZER_GO
// Go
@@ -928,4 +928,4 @@ void __tsan_go_atomic64_compare_exchange(
*(bool*)(a+24) = (cur == cmp);
}
} // extern "C"
-#endif // #ifndef SANITIZER_GO
+#endif // #if !SANITIZER_GO
diff --git a/lib/tsan/rtl/tsan_mutexset.h b/lib/tsan/rtl/tsan_mutexset.h
index 68f0ec26f..605c21a9c 100644
--- a/lib/tsan/rtl/tsan_mutexset.h
+++ b/lib/tsan/rtl/tsan_mutexset.h
@@ -43,7 +43,7 @@ class MutexSet {
}
private:
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr size_;
Desc descs_[kMaxSize];
#endif
@@ -55,7 +55,7 @@ class MutexSet {
// Go does not have mutexes, so do not spend memory and time.
// (Go sync.Mutex is actually a semaphore -- can be unlocked
// in different goroutine).
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
MutexSet::MutexSet() {}
void MutexSet::Add(u64 id, bool write, u64 epoch) {}
void MutexSet::Del(u64 id, bool write) {}
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index e6c261539..1dd9d91d4 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -24,7 +24,7 @@
namespace __tsan {
-#if !defined(SANITIZER_GO)
+#if !SANITIZER_GO
#if defined(__x86_64__)
/*
@@ -276,7 +276,7 @@ struct Mapping46 {
#define TSAN_RUNTIME_VMA 1
#endif
-#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS
+#elif SANITIZER_GO && !SANITIZER_WINDOWS
/* Go on linux, darwin and freebsd
0000 0000 1000 - 0000 1000 0000: executable
@@ -302,7 +302,7 @@ struct Mapping {
static const uptr kAppMemEnd = 0x00e000000000ull;
};
-#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS
+#elif SANITIZER_GO && SANITIZER_WINDOWS
/* Go on windows
0000 0000 1000 - 0000 1000 0000: executable
@@ -362,7 +362,7 @@ enum MappingType {
template<typename Mapping, int Type>
uptr MappingImpl(void) {
switch (Type) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg;
case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd;
# ifdef TSAN_MID_APP_RANGE
@@ -408,7 +408,7 @@ uptr MappingArchImpl(void) {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
ALWAYS_INLINE
uptr LoAppMemBeg(void) {
return MappingArchImpl<MAPPING_LO_APP_BEG>();
@@ -470,7 +470,7 @@ bool GetUserRegion(int i, uptr *start, uptr *end) {
switch (i) {
default:
return false;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
case 0:
*start = LoAppMemBeg();
*end = LoAppMemEnd();
@@ -528,7 +528,7 @@ uptr TraceMemEnd(void) {
template<typename Mapping>
bool IsAppMemImpl(uptr mem) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
# ifdef TSAN_MID_APP_RANGE
(mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) ||
@@ -619,7 +619,7 @@ bool IsMetaMem(uptr mem) {
template<typename Mapping>
uptr MemToShadowImpl(uptr x) {
DCHECK(IsAppMem(x));
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
^ Mapping::kAppMemXor) * kShadowCnt;
#else
@@ -656,7 +656,7 @@ uptr MemToShadow(uptr x) {
template<typename Mapping>
u32 *MemToMetaImpl(uptr x) {
DCHECK(IsAppMem(x));
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) /
kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
#else
@@ -695,7 +695,7 @@ u32 *MemToMeta(uptr x) {
template<typename Mapping>
uptr ShadowToMemImpl(uptr s) {
DCHECK(IsShadowMem(s));
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// The shadow mapping is non-linear and we've lost some bits, so we don't have
// an easy way to restore the original app address. But the mapping is a
// bijection, so we try to restore the address as belonging to low/mid/high
@@ -713,7 +713,7 @@ uptr ShadowToMemImpl(uptr s) {
return p;
# endif
return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
-#else // #ifndef SANITIZER_GO
+#else // #if !SANITIZER_GO
# ifndef SANITIZER_WINDOWS
return (s & ~Mapping::kShadowBeg) / kShadowCnt;
# else
diff --git a/lib/tsan/rtl/tsan_platform_linux.cc b/lib/tsan/rtl/tsan_platform_linux.cc
index 6c78a3b08..233de103f 100644
--- a/lib/tsan/rtl/tsan_platform_linux.cc
+++ b/lib/tsan/rtl/tsan_platform_linux.cc
@@ -98,7 +98,7 @@ void FillProfileCallback(uptr p, uptr rss, bool file,
mem[MemShadow] += rss;
else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
mem[MemMeta] += rss;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
else if (p >= HeapMemBeg() && p < HeapMemEnd())
mem[MemHeap] += rss;
else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
@@ -144,7 +144,7 @@ void FlushShadowMemory() {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Mark shadow for .rodata sections with the special kShadowRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
@@ -206,7 +206,7 @@ void InitializeShadowMemoryPlatform() {
MapRodata();
}
-#endif // #ifndef SANITIZER_GO
+#endif // #if !SANITIZER_GO
void InitializePlatformEarly() {
#ifdef TSAN_RUNTIME_VMA
@@ -234,7 +234,7 @@ void InitializePlatform() {
// Go maps shadow memory lazily and works fine with limited address space.
// Unlimited stack is not a problem as well, because the executable
// is not compiled with -pie.
- if (kCppMode) {
+ if (!SANITIZER_GO) {
bool reexec = false;
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
@@ -276,13 +276,13 @@ void InitializePlatform() {
ReExec();
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
CheckAndProtect();
InitTlsSize();
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Extract file descriptors passed to glibc internal __res_iclose function.
// This is required to properly "close" the fds, because we do not see internal
// closes within glibc. The code is a pure hack.
@@ -335,11 +335,11 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
}
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ReplaceSystemMalloc() { }
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
#if SANITIZER_ANDROID
#if defined(__aarch64__)
@@ -400,7 +400,7 @@ void cur_thread_finalize() {
CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
}
#endif // SANITIZER_ANDROID
-#endif // ifndef SANITIZER_GO
+#endif // if !SANITIZER_GO
} // namespace __tsan
diff --git a/lib/tsan/rtl/tsan_platform_mac.cc b/lib/tsan/rtl/tsan_platform_mac.cc
index 0cc02ab87..d720fe43f 100644
--- a/lib/tsan/rtl/tsan_platform_mac.cc
+++ b/lib/tsan/rtl/tsan_platform_mac.cc
@@ -42,7 +42,7 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
void *val = (void *)atomic_load_relaxed(a);
@@ -110,7 +110,7 @@ void FlushShadowMemory() {
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void InitializeShadowMemoryPlatform() { }
// On OS X, GCD worker threads are created without a call to pthread_create. We
@@ -160,7 +160,7 @@ void InitializePlatformEarly() {
void InitializePlatform() {
DisableCoreDumperIfNecessary();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
CheckAndProtect();
CHECK_EQ(main_thread_identity, 0);
@@ -171,7 +171,7 @@ void InitializePlatform() {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
diff --git a/lib/tsan/rtl/tsan_platform_posix.cc b/lib/tsan/rtl/tsan_platform_posix.cc
index 805ce1be4..0732c83d6 100644
--- a/lib/tsan/rtl/tsan_platform_posix.cc
+++ b/lib/tsan/rtl/tsan_platform_posix.cc
@@ -23,7 +23,7 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
uptr shadow =
diff --git a/lib/tsan/rtl/tsan_report.cc b/lib/tsan/rtl/tsan_report.cc
index 93604946a..156876e5c 100644
--- a/lib/tsan/rtl/tsan_report.cc
+++ b/lib/tsan/rtl/tsan_report.cc
@@ -71,7 +71,7 @@ ReportDesc::~ReportDesc() {
// FIXME(dvyukov): it must be leaking a lot of memory.
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
@@ -361,7 +361,7 @@ void PrintReport(const ReportDesc *rep) {
Printf("==================\n");
}
-#else // #ifndef SANITIZER_GO
+#else // #if !SANITIZER_GO
const int kMainThreadId = 1;
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index 6bc76c780..bd3f8e7bd 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -44,7 +44,7 @@ extern "C" void __tsan_resume() {
namespace __tsan {
-#if !defined(SANITIZER_GO) && !SANITIZER_MAC
+#if !SANITIZER_GO && !SANITIZER_MAC
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
@@ -86,7 +86,7 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
return new(mem) ThreadContext(tid);
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static const u32 kThreadQuarantineSize = 16;
#else
static const u32 kThreadQuarantineSize = 64;
@@ -117,7 +117,7 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
// , ignore_reads_and_writes()
// , ignore_interceptors()
, clock(tid, reuse_count)
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
, jmp_bufs(MBlockJmpBuf)
#endif
, tid(tid)
@@ -126,13 +126,13 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
, stk_size(stk_size)
, tls_addr(tls_addr)
, tls_size(tls_size)
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
, last_sleep_clock(tid)
#endif
{
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
uptr n_threads;
uptr n_running_threads;
@@ -333,12 +333,12 @@ void Initialize(ThreadState *thr) {
SetCheckFailedCallback(TsanCheckFailed);
ctx = new(ctx_placeholder) Context;
- const char *options = GetEnv(kTsanOptionsEnv);
+ const char *options = GetEnv(SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS");
CacheBinaryName();
InitializeFlags(&ctx->flags, options);
AvoidCVE_2016_2143();
InitializePlatformEarly();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Re-exec ourselves if we need to set additional env or command line args.
MaybeReexec();
@@ -354,14 +354,14 @@ void Initialize(ThreadState *thr) {
InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
InitializeShadowMemory();
InitializeAllocatorLate();
#endif
// Setup correct file descriptor for error reports.
__sanitizer_set_report_path(common_flags()->log_path);
InitializeSuppressions();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
InitializeLibIgnore();
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
// On MIPS, TSan initialization is run before
@@ -385,7 +385,7 @@ void Initialize(ThreadState *thr) {
#endif
ctx->initialized = true;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Symbolizer::LateInitialize();
#endif
@@ -411,7 +411,7 @@ int Finalize(ThreadState *thr) {
CommonSanitizerReportMutex.Unlock();
ctx->report_mtx.Unlock();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (Verbosity()) AllocatorPrintStats();
#endif
@@ -419,7 +419,7 @@ int Finalize(ThreadState *thr) {
if (ctx->nreported) {
failed = true;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
#else
Printf("Found %d data race(s)\n", ctx->nreported);
@@ -434,7 +434,7 @@ int Finalize(ThreadState *thr) {
if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (flags()->print_benign)
PrintMatchedBenignRaces();
#endif
@@ -449,7 +449,7 @@ int Finalize(ThreadState *thr) {
return failed ? common_flags()->exitcode : 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ForkBefore(ThreadState *thr, uptr pc) {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
@@ -482,7 +482,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc) {
}
#endif
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
NOINLINE
void GrowShadowStack(ThreadState *thr) {
const int sz = thr->shadow_stack_end - thr->shadow_stack;
@@ -501,7 +501,7 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
if (!thr->is_inited) // May happen during bootstrap.
return 0;
if (pc != 0) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -547,7 +547,7 @@ uptr TraceParts() {
return TraceSize() / kTracePartSize;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
extern "C" void __tsan_trace_switch() {
TraceSwitch(cur_thread());
}
@@ -580,7 +580,7 @@ void HandleRace(ThreadState *thr, u64 *shadow_mem,
thr->racy_state[0] = cur.raw();
thr->racy_state[1] = old.raw();
thr->racy_shadow_addr = shadow_mem;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
HACKY_CALL(__tsan_report_race);
#else
ReportRace(thr);
@@ -777,7 +777,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
#endif
- if (kCppMode && *shadow_mem == kShadowRodata) {
+ if (!SANITIZER_GO && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
StatInc(thr, StatMop);
@@ -864,7 +864,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do it only for C/C++.
- if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
+ if (SANITIZER_GO || size < common_flags()->clear_shadow_mmap_threshold) {
u64 *p = (u64*)MemToShadow(addr);
CHECK(IsShadowMem((uptr)p));
CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
@@ -950,7 +950,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -970,7 +970,7 @@ void FuncExit(ThreadState *thr) {
}
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#endif
thr->shadow_stack_pos--;
@@ -981,7 +981,7 @@ void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -993,7 +993,7 @@ void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
CHECK_GE(thr->ignore_reads_and_writes, 0);
if (thr->ignore_reads_and_writes == 0) {
thr->fast_state.ClearIgnoreBit();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
thr->mop_ignore_set.Reset();
#endif
}
@@ -1003,7 +1003,7 @@ void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -1013,7 +1013,7 @@ void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
thr->ignore_sync--;
CHECK_GE(thr->ignore_sync, 0);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (thr->ignore_sync == 0)
thr->sync_ignore_set.Reset();
#endif
@@ -1037,7 +1037,7 @@ void build_consistency_nostats() {}
} // namespace __tsan
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Must be included in this file to make sure everything is inlined.
#include "tsan_interface_inl.h"
#endif
diff --git a/lib/tsan/rtl/tsan_rtl.h b/lib/tsan/rtl/tsan_rtl.h
index c2bddd8d7..8fdb6a9af 100644
--- a/lib/tsan/rtl/tsan_rtl.h
+++ b/lib/tsan/rtl/tsan_rtl.h
@@ -52,7 +52,7 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
struct MapUnmapCallback;
#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
static const uptr kAllocatorSpace = 0;
@@ -341,7 +341,7 @@ struct JmpBuf {
// A ThreadState must be wired with a Processor to handle events.
struct Processor {
ThreadState *thr; // currently wired thread, or nullptr
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
AllocatorCache alloc_cache;
InternalAllocatorCache internal_alloc_cache;
#endif
@@ -351,7 +351,7 @@ struct Processor {
DDPhysicalThread *dd_pt;
};
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// ScopedGlobalProcessor temporary setups a global processor for the current
// thread, if it does not have one. Intended for interceptors that can run
// at the very thread end, when we already destroyed the thread processor.
@@ -382,7 +382,7 @@ struct ThreadState {
int ignore_reads_and_writes;
int ignore_sync;
// Go does not support ignores.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
IgnoreSet mop_ignore_set;
IgnoreSet sync_ignore_set;
#endif
@@ -395,7 +395,7 @@ struct ThreadState {
u64 racy_state[2];
MutexSet mset;
ThreadClock clock;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Vector<JmpBuf> jmp_bufs;
int ignore_interceptors;
#endif
@@ -423,7 +423,7 @@ struct ThreadState {
// Current wired Processor, or nullptr. Required to handle any events.
Processor *proc1;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Processor *proc() { return proc1; }
#else
Processor *proc();
@@ -432,7 +432,7 @@ struct ThreadState {
atomic_uintptr_t in_signal_handler;
ThreadSignalContext *signal_ctx;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
#endif
@@ -449,7 +449,7 @@ struct ThreadState {
uptr tls_addr, uptr tls_size);
};
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
#if SANITIZER_MAC || SANITIZER_ANDROID
ThreadState *cur_thread();
void cur_thread_finalize();
@@ -547,13 +547,13 @@ extern Context *ctx; // The one and the only global runtime context.
struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptors() {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
cur_thread()->ignore_interceptors++;
#endif
}
~ScopedIgnoreInterceptors() {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
cur_thread()->ignore_interceptors--;
#endif
}
@@ -793,7 +793,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
HACKY_CALL(__tsan_trace_switch);
#else
TraceSwitch(thr);
@@ -805,7 +805,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
*evp = ev;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr ALWAYS_INLINE HeapEnd() {
return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
}
diff --git a/lib/tsan/rtl/tsan_rtl_mutex.cc b/lib/tsan/rtl/tsan_rtl_mutex.cc
index 1806acf06..f3b51c30f 100644
--- a/lib/tsan/rtl/tsan_rtl_mutex.cc
+++ b/lib/tsan/rtl/tsan_rtl_mutex.cc
@@ -50,7 +50,7 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
uptr addr, u64 mid) {
// In Go, these misuses are either impossible, or detected by std lib,
// or false positives (e.g. unlock in a different thread).
- if (kGoMode)
+ if (SANITIZER_GO)
return;
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(typ);
@@ -76,7 +76,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
s->is_rw = rw;
s->is_recursive = recursive;
s->is_linker_init = linker_init;
- if (kCppMode && s->creation_stack_id == 0)
+ if (!SANITIZER_GO && s->creation_stack_id == 0)
s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
}
@@ -195,7 +195,7 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
bool report_bad_unlock = false;
- if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
if (flags()->report_mutex_bugs && !s->is_broken) {
s->is_broken = true;
report_bad_unlock = true;
@@ -412,7 +412,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
diff --git a/lib/tsan/rtl/tsan_rtl_proc.cc b/lib/tsan/rtl/tsan_rtl_proc.cc
index 0c838a138..efccdb590 100644
--- a/lib/tsan/rtl/tsan_rtl_proc.cc
+++ b/lib/tsan/rtl/tsan_rtl_proc.cc
@@ -23,7 +23,7 @@ Processor *ProcCreate() {
internal_memset(mem, 0, sizeof(Processor));
Processor *proc = new(mem) Processor;
proc->thr = nullptr;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
AllocatorProcStart(proc);
#endif
if (common_flags()->detect_deadlocks)
@@ -33,7 +33,7 @@ Processor *ProcCreate() {
void ProcDestroy(Processor *proc) {
CHECK_EQ(proc->thr, nullptr);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
AllocatorProcFinish(proc);
#endif
ctx->clock_alloc.FlushCache(&proc->clock_cache);
diff --git a/lib/tsan/rtl/tsan_rtl_report.cc b/lib/tsan/rtl/tsan_rtl_report.cc
index 7869570cc..bc8944fbf 100644
--- a/lib/tsan/rtl/tsan_rtl_report.cc
+++ b/lib/tsan/rtl/tsan_rtl_report.cc
@@ -38,7 +38,7 @@ void TsanCheckFailed(const char *file, int line, const char *cond,
// on the other hand there is no sense in processing interceptors
// since we are going to die soon.
ScopedIgnoreInterceptors ignore;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
cur_thread()->ignore_sync++;
cur_thread()->ignore_reads_and_writes++;
#endif
@@ -75,7 +75,7 @@ static void StackStripMain(SymbolizedStack *frames) {
if (last_frame2 == 0)
return;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const char *last = last_frame->info.function;
const char *last2 = last_frame2->info.function;
// Strip frame above 'main'
@@ -208,7 +208,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
rt->stack->suppressable = suppressable;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
int unique_id = *(int *)arg;
return tctx->unique_id == (u32)unique_id;
@@ -253,7 +253,7 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
#endif
void ScopedReport::AddThread(int unique_tid, bool suppressable) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
AddThread(tctx, suppressable);
#endif
@@ -309,7 +309,7 @@ void ScopedReport::AddDeadMutex(u64 id) {
void ScopedReport::AddLocation(uptr addr, uptr size) {
if (addr == 0)
return;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
@@ -359,7 +359,7 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
}
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ScopedReport::AddSleep(u32 stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
@@ -664,7 +664,7 @@ void ReportRace(ThreadState *thr) {
rep.AddLocation(addr_min, addr_max - addr_min);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
{ // NOLINT
Shadow s(thr->racy_state[1]);
if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
@@ -693,7 +693,7 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
ALWAYS_INLINE
void PrintCurrentStackSlow(uptr pc) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
BufferedStackTrace *ptrace =
new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
BufferedStackTrace();
diff --git a/lib/tsan/rtl/tsan_rtl_thread.cc b/lib/tsan/rtl/tsan_rtl_thread.cc
index fbe6111f6..40c3b3127 100644
--- a/lib/tsan/rtl/tsan_rtl_thread.cc
+++ b/lib/tsan/rtl/tsan_rtl_thread.cc
@@ -30,7 +30,7 @@ ThreadContext::ThreadContext(int tid)
, epoch1() {
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
@@ -94,7 +94,7 @@ void ThreadContext::OnStarted(void *arg) {
epoch1 = (u64)-1;
new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
@@ -125,7 +125,7 @@ void ThreadContext::OnStarted(void *arg) {
}
void ThreadContext::OnFinished() {
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
internal_free(thr->shadow_stack);
thr->shadow_stack = nullptr;
thr->shadow_stack_pos = nullptr;
@@ -148,7 +148,7 @@ void ThreadContext::OnFinished() {
thr = 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
struct ThreadLeak {
ThreadContext *tctx;
int count;
@@ -170,7 +170,7 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
}
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == 0) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
@@ -202,7 +202,7 @@ static void ThreadCheckIgnore(ThreadState *thr) {}
void ThreadFinalize(ThreadState *thr) {
ThreadCheckIgnore(thr);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!flags()->report_thread_leaks)
return;
ThreadRegistryLock l(ctx->thread_registry);
@@ -240,7 +240,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
uptr stk_size = 0;
uptr tls_addr = 0;
uptr tls_size = 0;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
if (tid) {
@@ -271,7 +271,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
tr->Unlock();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (ctx->after_multithreaded_fork) {
thr->ignore_interceptors++;
ThreadIgnoreBegin(thr, 0);
diff --git a/lib/tsan/rtl/tsan_suppressions.cc b/lib/tsan/rtl/tsan_suppressions.cc
index aea3cb978..bfb64e001 100644
--- a/lib/tsan/rtl/tsan_suppressions.cc
+++ b/lib/tsan/rtl/tsan_suppressions.cc
@@ -21,7 +21,7 @@
#include "tsan_mman.h"
#include "tsan_platform.h"
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Suppressions for true/false positives in standard libraries.
static const char *const std_suppressions =
// Libstdc++ 4.4 has data races in std::string.
@@ -54,7 +54,7 @@ void InitializeSuppressions() {
suppression_ctx = new (suppression_placeholder) // NOLINT
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
suppression_ctx->ParseFromFile(flags()->suppressions);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
suppression_ctx->Parse(__tsan_default_suppressions());
suppression_ctx->Parse(std_suppressions);
#endif
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index 58b26802b..44c6a26a1 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -30,7 +30,7 @@ void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
this->next = 0;
creation_stack_id = 0;
- if (kCppMode) // Go does not use them
+ if (!SANITIZER_GO) // Go does not use them
creation_stack_id = CurrentStackId(thr, pc);
if (common_flags()->detect_deadlocks)
DDMutexInit(thr, pc, this);
@@ -120,7 +120,7 @@ bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
- if (kGoMode) {
+ if (SANITIZER_GO) {
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do the optimization only for C/C++.
FreeRange(proc, p, sz);
diff --git a/lib/tsan/rtl/tsan_trace.h b/lib/tsan/rtl/tsan_trace.h
index 2569c7e42..96a18ac41 100644
--- a/lib/tsan/rtl/tsan_trace.h
+++ b/lib/tsan/rtl/tsan_trace.h
@@ -42,7 +42,7 @@ enum EventType {
typedef u64 Event;
struct TraceHeader {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
BufferedStackTrace stack0; // Start stack for the trace.
#else
VarSizeStackTrace stack0;
@@ -55,7 +55,7 @@ struct TraceHeader {
struct Trace {
Mutex mtx;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Must be last to catch overflow as paging fault.
// Go shadow stack is dynamically allocated.
uptr shadow_stack[kShadowStackSize];