summaryrefslogtreecommitdiff
path: root/lib/msan/msan_allocator.h
diff options
context:
space:
mode:
authorMaxim Ostapenko <chefmax7@gmail.com>2017-05-31 07:28:09 +0000
committerMaxim Ostapenko <chefmax7@gmail.com>2017-05-31 07:28:09 +0000
commite1a8ef98132e737d3e7c4045a3e6e6e7ae20242e (patch)
treedb489cef503c1af92ec3a9d7651d282575f9ff29 /lib/msan/msan_allocator.h
parent97fc005f646a7ce2ee15b97bb7e015a812eb2421 (diff)
[sanitizer] Avoid possible deadlock in child process after fork
This patch addresses https://github.com/google/sanitizers/issues/774. When we fork a multi-threaded process it's possible to deadlock if some thread acquired StackDepot or allocator internal lock just before fork. In this case the lock will never be released in child process causing deadlock on following memory alloc/dealloc routine. While calling alloc/dealloc routines after multi-threaded fork is not allowed, most of modern allocators (Glibc, tcmalloc, jemalloc) are actually fork safe. Let's do the same for sanitizers except TSan that has complex locking rules. Differential Revision: https://reviews.llvm.org/D33325 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@304285 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/msan/msan_allocator.h')
-rw-r--r--lib/msan/msan_allocator.h97
1 files changed, 97 insertions, 0 deletions
diff --git a/lib/msan/msan_allocator.h b/lib/msan/msan_allocator.h
index 407942e54..abd4ea678 100644
--- a/lib/msan/msan_allocator.h
+++ b/lib/msan/msan_allocator.h
@@ -15,9 +15,106 @@
#define MSAN_ALLOCATOR_H
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
namespace __msan {
+struct Metadata {
+ uptr requested_size;
+};
+
+struct MsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const {}
+ void OnUnmap(uptr p, uptr size) const {
+ __msan_unpoison((void *)p, size);
+
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ uptr shadow_p = MEM_TO_SHADOW(p);
+ ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
+ if (__msan_get_track_origins()) {
+ uptr origin_p = MEM_TO_ORIGIN(p);
+ ReleaseMemoryPagesToOS(origin_p, origin_p + size);
+ }
+ }
+};
+
+#if defined(__mips64)
+ static const uptr kMaxAllowedMallocSize = 2UL << 30;
+ static const uptr kRegionSizeLog = 20;
+ static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+ typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+
+ struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
+ typedef __msan::ByteMap ByteMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+ typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#elif defined(__x86_64__)
+#if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING)
+ static const uptr kAllocatorSpace = 0x700000000000ULL;
+#else
+ static const uptr kAllocatorSpace = 0x600000000000ULL;
+#endif
+ static const uptr kMaxAllowedMallocSize = 8UL << 30;
+
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+
+ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+
+#elif defined(__powerpc64__)
+ static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x300000000000;
+ static const uptr kSpaceSize = 0x020000000000; // 2T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+
+ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#elif defined(__aarch64__)
+ static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+ static const uptr kRegionSizeLog = 20;
+ static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+ typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+
+ struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
+ typedef __msan::ByteMap ByteMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+ typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#endif
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+
+Allocator &get_allocator();
+
struct MsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
// Allocator cache contains atomic_uint64_t which must be 8-byte aligned.