summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_platform.h
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2016-09-22 13:42:02 +0000
committerDmitry Vyukov <dvyukov@google.com>2016-09-22 13:42:02 +0000
commit16e13a58c700f4944ef908964d51e79699dbbd87 (patch)
treeafa9f7bd468f9a5cb359a2d0f12f4ec204302762 /lib/tsan/rtl/tsan_platform.h
parent5042ecdfcbc09e87491222df05507dff67ad07ae (diff)
tsan: support pie binaries on newer kernels
4.1+ Linux kernels map pie binaries at 0x55: https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=d1fd836dcf00d2028c700c7e44d2c23404062c90 Currently tsan does not support app memory at 0x55 (https://github.com/google/sanitizers/issues/503). Older kernels also map pie binaries at 0x55 when ASLR is disables (most notably under gdb). This change extends tsan mapping for linux/x86_64 to cover 0x554-0x568 app range and fixes both 4.1+ kernels and gdb. This required to slightly shrink low and high app ranges and move heap. The mapping become even more non-linear, since now we xor lower bits. Now even a continuous app range maps to split, intermixed shadow ranges. This breaks ShadowToMemImpl as it assumes linear mapping at least within a continuous app range (however it turned out to be already broken at least on arm64/42-bit vma as uncovered by r281970). So also change ShadowToMemImpl to hopefully a more robust implementation that does not assume a linear mapping. git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@282152 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/tsan/rtl/tsan_platform.h')
-rw-r--r--lib/tsan/rtl/tsan_platform.h57
1 files changed, 35 insertions, 22 deletions
diff --git a/lib/tsan/rtl/tsan_platform.h b/lib/tsan/rtl/tsan_platform.h
index 6ab26476d..903ff5be3 100644
--- a/lib/tsan/rtl/tsan_platform.h
+++ b/lib/tsan/rtl/tsan_platform.h
@@ -29,35 +29,42 @@ namespace __tsan {
#if defined(__x86_64__)
/*
C/C++ on linux/x86_64 and freebsd/x86_64
-0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings
-0100 0000 0000 - 0200 0000 0000: -
-0200 0000 0000 - 1000 0000 0000: shadow
+0000 0000 1000 - 0040 0000 0000: main binary and/or MAP_32BIT mappings (256GB)
+0040 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
1000 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
+4000 0000 0000 - 5540 0000 0000: -
+5540 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
+5680 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
-7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7e80 0000 0000: -
-7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+7c40 0000 0000 - 7d40 0000 0000: heap
+7d40 0000 0000 - 7ec0 0000 0000: -
+7ec0 0000 0000 - 8000 0000 0000: modules and main thread stack
*/
struct Mapping {
static const uptr kMetaShadowBeg = 0x300000000000ull;
static const uptr kMetaShadowEnd = 0x400000000000ull;
static const uptr kTraceMemBeg = 0x600000000000ull;
static const uptr kTraceMemEnd = 0x620000000000ull;
- static const uptr kShadowBeg = 0x020000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
static const uptr kShadowEnd = 0x100000000000ull;
- static const uptr kHeapMemBeg = 0x7d0000000000ull;
- static const uptr kHeapMemEnd = 0x7e0000000000ull;
+ static const uptr kHeapMemBeg = 0x7c4000000000ull;
+ static const uptr kHeapMemEnd = 0x7d4000000000ull;
static const uptr kLoAppMemBeg = 0x000000001000ull;
- static const uptr kLoAppMemEnd = 0x010000000000ull;
- static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kLoAppMemEnd = 0x004000000000ull;
+ static const uptr kMidAppMemBeg = 0x554000000000ull;
+ static const uptr kMidAppMemEnd = 0x568000000000ull;
+ static const uptr kMidShadowOff = 0x540000000000ull;
+ static const uptr kHiAppMemBeg = 0x7ec000000000ull;
static const uptr kHiAppMemEnd = 0x800000000000ull;
static const uptr kAppMemMsk = 0x7c0000000000ull;
- static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kAppMemXor = 0x028000000000ull;
static const uptr kVdsoBeg = 0xf000000000000000ull;
};
+
+#define TSAN_MID_APP_RANGE 1
#elif defined(__mips64)
/*
C/C++ on linux/mips64
@@ -690,17 +697,23 @@ template<typename Mapping>
uptr ShadowToMemImpl(uptr s) {
DCHECK(IsShadowMem(s));
#ifndef SANITIZER_GO
- if (s >= MemToShadow(Mapping::kLoAppMemBeg)
- && s <= MemToShadow(Mapping::kLoAppMemEnd - 1))
- return (s / kShadowCnt) ^ Mapping::kAppMemXor;
+ // The shadow mapping is non-linear and we've lost some bits, so we don't have
+ // an easy way to restore the original app address. But the mapping is a
+ // bijection, so we try to restore the address as belonging to low/mid/high
+ // range consecutively and see if shadow->app->shadow mapping gives us the
+ // same address.
+ uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor;
+ if (MemToShadow(p) == s &&
+ p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd)
+ return p;
# ifdef TSAN_MID_APP_RANGE
- if (s >= MemToShadow(Mapping::kMidAppMemBeg)
- && s <= MemToShadow(Mapping::kMidAppMemEnd - 1))
- return ((s / kShadowCnt) ^ Mapping::kAppMemXor) + Mapping::kMidShadowOff;
+ p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) + Mapping::kMidShadowOff;
+ if (MemToShadow(p) == s &&
+ p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd)
+ return p;
# endif
- else
- return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
-#else
+ return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
+#else // #ifndef SANITIZER_GO
# ifndef SANITIZER_WINDOWS
return (s & ~Mapping::kShadowBeg) / kShadowCnt;
# else