summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_platform.h
diff options
context:
space:
mode:
authorBill Schmidt <wschmidt@linux.vnet.ibm.com>2015-12-08 21:54:39 +0000
committerBill Schmidt <wschmidt@linux.vnet.ibm.com>2015-12-08 21:54:39 +0000
commit686cdf076959ee468a6777e9bed157291dc8445a (patch)
tree5e429770241a69d0b39d84e46e67ad1ff1ae1518 /lib/tsan/rtl/tsan_platform.h
parent1f3b4537c406d7dca8b1c43b6dee5e0eaa9148d8 (diff)
[PPC64, TSAN] LLVM basic enablement of thread sanitizer for PPC64 (BE and LE)
This patch is by Simone Atzeni with portions by Adhemerval Zanella. This contains the LLVM patches to enable the thread sanitizer for PPC64, both big- and little-endian. Two different virtual memory sizes are supported: Old kernels use a 44-bit address space, while newer kernels require a 46-bit address space. There are two companion patches that will be added shortly. There is a Clang patch to actually turn on the use of the thread sanitizer for PPC64. There is also a patch that I wrote to provide interceptor support for setjmp/longjmp on PPC64. Patch discussion at reviews.llvm.org/D12841. git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@255057 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/tsan/rtl/tsan_platform.h')
-rw-r--r--lib/tsan/rtl/tsan_platform.h127
1 files changed, 127 insertions, 0 deletions
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index 254ea7985..b6bd8b0f5 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -161,6 +161,79 @@ struct Mapping42 {
// Indicates the runtime will define the memory regions at runtime.
#define TSAN_RUNTIME_VMA 1
+#elif defined(__powerpc64__)
+// PPC64 supports multiple VMA which leads to multiple address transformation
+// functions. To support these multiple VMAS transformations and mappings TSAN
+// runtime for PPC64 uses an external memory read (vmaSize) to select which
+// mapping to use. Although slower, it make a same instrumented binary run on
+// multiple kernels.
+
+/*
+C/C++ on linux/powerpc64 (44-bit VMA)
+0000 0000 0100 - 0001 0000 0000: main binary
+0001 0000 0000 - 0001 0000 0000: -
+0001 0000 0000 - 0b00 0000 0000: shadow
+0b00 0000 0000 - 0b00 0000 0000: -
+0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects)
+0d00 0000 0000 - 0d00 0000 0000: -
+0d00 0000 0000 - 0f00 0000 0000: traces
+0f00 0000 0000 - 0f00 0000 0000: -
+0f00 0000 0000 - 0f50 0000 0000: heap
+0f50 0000 0000 - 0f60 0000 0000: -
+0f60 0000 0000 - 1000 0000 0000: modules and main thread stack
+*/
+struct Mapping44 {
+ static const uptr kMetaShadowBeg = 0x0b0000000000ull;
+ static const uptr kMetaShadowEnd = 0x0d0000000000ull;
+ static const uptr kTraceMemBeg = 0x0d0000000000ull;
+ static const uptr kTraceMemEnd = 0x0f0000000000ull;
+ static const uptr kShadowBeg = 0x000100000000ull;
+ static const uptr kShadowEnd = 0x0b0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000000100ull;
+ static const uptr kLoAppMemEnd = 0x000100000000ull;
+ static const uptr kHeapMemBeg = 0x0f0000000000ull;
+ static const uptr kHeapMemEnd = 0x0f5000000000ull;
+ static const uptr kHiAppMemBeg = 0x0f6000000000ull;
+ static const uptr kHiAppMemEnd = 0x100000000000ull; // 44 bits
+ static const uptr kAppMemMsk = 0x0f0000000000ull;
+ static const uptr kAppMemXor = 0x002100000000ull;
+ static const uptr kVdsoBeg = 0x3c0000000000000ull;
+};
+
+/*
+C/C++ on linux/powerpc64 (46-bit VMA)
+0000 0000 1000 - 0100 0000 0000: main binary
+0100 0000 0000 - 0200 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
+2000 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2200 0000 0000: traces
+2200 0000 0000 - 3d00 0000 0000: -
+3d00 0000 0000 - 3e00 0000 0000: heap
+3e00 0000 0000 - 3e80 0000 0000: -
+3e80 0000 0000 - 4000 0000 0000: modules and main thread stack
+*/
+struct Mapping46 {
+ static const uptr kMetaShadowBeg = 0x100000000000ull;
+ static const uptr kMetaShadowEnd = 0x200000000000ull;
+ static const uptr kTraceMemBeg = 0x200000000000ull;
+ static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x3d0000000000ull;
+ static const uptr kHeapMemEnd = 0x3e0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kHiAppMemBeg = 0x3e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x400000000000ull; // 46 bits
+ static const uptr kAppMemMsk = 0x3c0000000000ull;
+ static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kVdsoBeg = 0x7800000000000000ull;
+};
+
+// Indicates the runtime will define the memory regions at runtime.
+#define TSAN_RUNTIME_VMA 1
#endif
#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS
@@ -274,6 +347,12 @@ uptr MappingArchImpl(void) {
else
return MappingImpl<Mapping42, Type>();
DCHECK(0);
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return MappingImpl<Mapping44, Type>();
+ else
+ return MappingImpl<Mapping46, Type>();
+ DCHECK(0);
#else
return MappingImpl<Mapping, Type>();
#endif
@@ -399,6 +478,12 @@ bool IsAppMem(uptr mem) {
else
return IsAppMemImpl<Mapping42>(mem);
DCHECK(0);
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return IsAppMemImpl<Mapping44>(mem);
+ else
+ return IsAppMemImpl<Mapping46>(mem);
+ DCHECK(0);
#else
return IsAppMemImpl<Mapping>(mem);
#endif
@@ -418,6 +503,12 @@ bool IsShadowMem(uptr mem) {
else
return IsShadowMemImpl<Mapping42>(mem);
DCHECK(0);
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return IsShadowMemImpl<Mapping44>(mem);
+ else
+ return IsShadowMemImpl<Mapping46>(mem);
+ DCHECK(0);
#else
return IsShadowMemImpl<Mapping>(mem);
#endif
@@ -437,6 +528,12 @@ bool IsMetaMem(uptr mem) {
else
return IsMetaMemImpl<Mapping42>(mem);
DCHECK(0);
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return IsMetaMemImpl<Mapping44>(mem);
+ else
+ return IsMetaMemImpl<Mapping46>(mem);
+ DCHECK(0);
#else
return IsMetaMemImpl<Mapping>(mem);
#endif
@@ -462,6 +559,12 @@ uptr MemToShadow(uptr x) {
else
return MemToShadowImpl<Mapping42>(x);
DCHECK(0);
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return MemToShadowImpl<Mapping44>(x);
+ else
+ return MemToShadowImpl<Mapping46>(x);
+ DCHECK(0);
#else
return MemToShadowImpl<Mapping>(x);
#endif
@@ -489,6 +592,12 @@ u32 *MemToMeta(uptr x) {
else
return MemToMetaImpl<Mapping42>(x);
DCHECK(0);
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return MemToMetaImpl<Mapping44>(x);
+ else
+ return MemToMetaImpl<Mapping46>(x);
+ DCHECK(0);
#else
return MemToMetaImpl<Mapping>(x);
#endif
@@ -522,6 +631,12 @@ uptr ShadowToMem(uptr s) {
else
return ShadowToMemImpl<Mapping42>(s);
DCHECK(0);
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return ShadowToMemImpl<Mapping44>(s);
+ else
+ return ShadowToMemImpl<Mapping46>(s);
+ DCHECK(0);
#else
return ShadowToMemImpl<Mapping>(s);
#endif
@@ -549,6 +664,12 @@ uptr GetThreadTrace(int tid) {
else
return GetThreadTraceImpl<Mapping42>(tid);
DCHECK(0);
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return GetThreadTraceImpl<Mapping44>(tid);
+ else
+ return GetThreadTraceImpl<Mapping46>(tid);
+ DCHECK(0);
#else
return GetThreadTraceImpl<Mapping>(tid);
#endif
@@ -571,6 +692,12 @@ uptr GetThreadTraceHeader(int tid) {
else
return GetThreadTraceHeaderImpl<Mapping42>(tid);
DCHECK(0);
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return GetThreadTraceHeaderImpl<Mapping44>(tid);
+ else
+ return GetThreadTraceHeaderImpl<Mapping46>(tid);
+ DCHECK(0);
#else
return GetThreadTraceHeaderImpl<Mapping>(tid);
#endif