summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxim Ostapenko <chefmax7@gmail.com>2017-05-31 07:28:09 +0000
committerMaxim Ostapenko <chefmax7@gmail.com>2017-05-31 07:28:09 +0000
commite1a8ef98132e737d3e7c4045a3e6e6e7ae20242e (patch)
treedb489cef503c1af92ec3a9d7651d282575f9ff29
parent97fc005f646a7ce2ee15b97bb7e015a812eb2421 (diff)
[sanitizer] Avoid possible deadlock in child process after fork
This patch addresses https://github.com/google/sanitizers/issues/774. When we fork a multi-threaded process it's possible to deadlock if some thread acquired StackDepot or allocator internal lock just before fork. In this case the lock will never be released in child process causing deadlock on following memory alloc/dealloc routine. While calling alloc/dealloc routines after multi-threaded fork is not allowed, most of modern allocators (Glibc, tcmalloc, jemalloc) are actually fork safe. Let's do the same for sanitizers except TSan that has complex locking rules. Differential Revision: https://reviews.llvm.org/D33325 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@304285 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/asan/asan_allocator.cc4
-rw-r--r--lib/asan/asan_allocator.h2
-rw-r--r--lib/asan/asan_interceptors.cc13
-rw-r--r--lib/lsan/lsan_interceptors.cc20
-rw-r--r--lib/msan/msan_allocator.cc96
-rw-r--r--lib/msan/msan_allocator.h97
-rw-r--r--lib/msan/msan_interceptors.cc2
-rw-r--r--test/sanitizer_common/TestCases/Linux/allocator_fork_no_hang.cc118
8 files changed, 255 insertions, 97 deletions
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index 7010b6023..db5a683e2 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -47,8 +47,6 @@ static u32 RZSize2Log(u32 rz_size) {
return res;
}
-static AsanAllocator &get_allocator();
-
// The memory chunk allocated from the underlying allocator looks like this:
// L L L L L L H H U U U U U U R R
// L -- left redzone words (0 or more bytes)
@@ -719,7 +717,7 @@ struct Allocator {
static Allocator instance(LINKER_INITIALIZED);
-static AsanAllocator &get_allocator() {
+AsanAllocator &get_allocator() {
return instance.allocator;
}
diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h
index ad1aeb58a..ce3e25dc5 100644
--- a/lib/asan/asan_allocator.h
+++ b/lib/asan/asan_allocator.h
@@ -213,5 +213,7 @@ void asan_mz_force_unlock();
void PrintInternalAllocatorStats();
void AsanSoftRssLimitExceededCallback(bool exceeded);
+AsanAllocator &get_allocator();
+
} // namespace __asan
#endif // ASAN_ALLOCATOR_H
diff --git a/lib/asan/asan_interceptors.cc b/lib/asan/asan_interceptors.cc
index cb2214f96..1f0dc9e2d 100644
--- a/lib/asan/asan_interceptors.cc
+++ b/lib/asan/asan_interceptors.cc
@@ -22,6 +22,7 @@
#include "asan_stats.h"
#include "asan_suppressions.h"
#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_libc.h"
#if SANITIZER_POSIX
@@ -705,11 +706,23 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
#endif // ASAN_INTERCEPT___CXA_ATEXIT
#if ASAN_INTERCEPT_FORK
+static void BeforeFork() {
+ get_allocator().ForceLock();
+ StackDepotLockAll();
+}
+
+static void AfterFork() {
+ StackDepotUnlockAll();
+ get_allocator().ForceUnlock();
+}
+
INTERCEPTOR(int, fork, void) {
ENSURE_ASAN_INITED();
+ BeforeFork();
if (common_flags()->coverage) CovBeforeFork();
int pid = REAL(fork)();
if (common_flags()->coverage) CovAfterFork(pid);
+ AfterFork();
return pid;
}
#endif // ASAN_INTERCEPT_FORK
diff --git a/lib/lsan/lsan_interceptors.cc b/lib/lsan/lsan_interceptors.cc
index 9e39a7d19..b8cf6ae07 100644
--- a/lib/lsan/lsan_interceptors.cc
+++ b/lib/lsan/lsan_interceptors.cc
@@ -22,6 +22,7 @@
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan.h"
#include "lsan_allocator.h"
@@ -97,6 +98,24 @@ INTERCEPTOR(void*, valloc, uptr size) {
}
#endif
+static void BeforeFork() {
+ LockAllocator();
+ StackDepotLockAll();
+}
+
+static void AfterFork() {
+ StackDepotUnlockAll();
+ UnlockAllocator();
+}
+
+INTERCEPTOR(int, fork, void) {
+ ENSURE_LSAN_INITED;
+ BeforeFork();
+ int pid = REAL(fork)();
+ AfterFork();
+ return pid;
+}
+
#if SANITIZER_INTERCEPT_MEMALIGN
INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
@@ -336,6 +355,7 @@ void InitializeInterceptors() {
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(fork);
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Report("LeakSanitizer: failed to create thread key.\n");
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index 1be573faa..f76b01de0 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -12,8 +12,6 @@
// MemorySanitizer allocator.
//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_allocator.h"
-#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "msan.h"
#include "msan_allocator.h"
#include "msan_origin.h"
@@ -22,102 +20,12 @@
namespace __msan {
-struct Metadata {
- uptr requested_size;
-};
-
-struct MsanMapUnmapCallback {
- void OnMap(uptr p, uptr size) const {}
- void OnUnmap(uptr p, uptr size) const {
- __msan_unpoison((void *)p, size);
-
- // We are about to unmap a chunk of user memory.
- // Mark the corresponding shadow memory as not needed.
- uptr shadow_p = MEM_TO_SHADOW(p);
- ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
- if (__msan_get_track_origins()) {
- uptr origin_p = MEM_TO_ORIGIN(p);
- ReleaseMemoryPagesToOS(origin_p, origin_p + size);
- }
- }
-};
-
-#if defined(__mips64)
- static const uptr kMaxAllowedMallocSize = 2UL << 30;
- static const uptr kRegionSizeLog = 20;
- static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
- typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
-
- struct AP32 {
- static const uptr kSpaceBeg = 0;
- static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
- static const uptr kMetadataSize = sizeof(Metadata);
- typedef __sanitizer::CompactSizeClassMap SizeClassMap;
- static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
- typedef __msan::ByteMap ByteMap;
- typedef MsanMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags = 0;
- };
- typedef SizeClassAllocator32<AP32> PrimaryAllocator;
-#elif defined(__x86_64__)
-#if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING)
- static const uptr kAllocatorSpace = 0x700000000000ULL;
-#else
- static const uptr kAllocatorSpace = 0x600000000000ULL;
-#endif
- static const uptr kMaxAllowedMallocSize = 8UL << 30;
-
- struct AP64 { // Allocator64 parameters. Deliberately using a short name.
- static const uptr kSpaceBeg = kAllocatorSpace;
- static const uptr kSpaceSize = 0x40000000000; // 4T.
- static const uptr kMetadataSize = sizeof(Metadata);
- typedef DefaultSizeClassMap SizeClassMap;
- typedef MsanMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags = 0;
- };
-
- typedef SizeClassAllocator64<AP64> PrimaryAllocator;
-
-#elif defined(__powerpc64__)
- static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
-
- struct AP64 { // Allocator64 parameters. Deliberately using a short name.
- static const uptr kSpaceBeg = 0x300000000000;
- static const uptr kSpaceSize = 0x020000000000; // 2T.
- static const uptr kMetadataSize = sizeof(Metadata);
- typedef DefaultSizeClassMap SizeClassMap;
- typedef MsanMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags = 0;
- };
-
- typedef SizeClassAllocator64<AP64> PrimaryAllocator;
-#elif defined(__aarch64__)
- static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
- static const uptr kRegionSizeLog = 20;
- static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
- typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
-
- struct AP32 {
- static const uptr kSpaceBeg = 0;
- static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
- static const uptr kMetadataSize = sizeof(Metadata);
- typedef __sanitizer::CompactSizeClassMap SizeClassMap;
- static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
- typedef __msan::ByteMap ByteMap;
- typedef MsanMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags = 0;
- };
- typedef SizeClassAllocator32<AP32> PrimaryAllocator;
-#endif
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
-typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
-typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
- SecondaryAllocator> Allocator;
-
static Allocator allocator;
static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
+Allocator &get_allocator() { return allocator; }
+
void MsanAllocatorInit() {
allocator.Init(
common_flags()->allocator_may_return_null,
diff --git a/lib/msan/msan_allocator.h b/lib/msan/msan_allocator.h
index 407942e54..abd4ea678 100644
--- a/lib/msan/msan_allocator.h
+++ b/lib/msan/msan_allocator.h
@@ -15,9 +15,106 @@
#define MSAN_ALLOCATOR_H
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
namespace __msan {
+struct Metadata {
+ uptr requested_size;
+};
+
+struct MsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const {}
+ void OnUnmap(uptr p, uptr size) const {
+ __msan_unpoison((void *)p, size);
+
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ uptr shadow_p = MEM_TO_SHADOW(p);
+ ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
+ if (__msan_get_track_origins()) {
+ uptr origin_p = MEM_TO_ORIGIN(p);
+ ReleaseMemoryPagesToOS(origin_p, origin_p + size);
+ }
+ }
+};
+
+#if defined(__mips64)
+ static const uptr kMaxAllowedMallocSize = 2UL << 30;
+ static const uptr kRegionSizeLog = 20;
+ static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+ typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+
+ struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
+ typedef __msan::ByteMap ByteMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+ typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#elif defined(__x86_64__)
+#if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING)
+ static const uptr kAllocatorSpace = 0x700000000000ULL;
+#else
+ static const uptr kAllocatorSpace = 0x600000000000ULL;
+#endif
+ static const uptr kMaxAllowedMallocSize = 8UL << 30;
+
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+
+ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+
+#elif defined(__powerpc64__)
+ static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+
+ struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x300000000000;
+ static const uptr kSpaceSize = 0x020000000000; // 2T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+
+ typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#elif defined(__aarch64__)
+ static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+ static const uptr kRegionSizeLog = 20;
+ static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+ typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+
+ struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = __msan::kRegionSizeLog;
+ typedef __msan::ByteMap ByteMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ };
+ typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#endif
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
+ SecondaryAllocator> Allocator;
+
+
+Allocator &get_allocator();
+
struct MsanThreadLocalMallocStorage {
uptr quarantine_cache[16];
// Allocator cache contains atomic_uint64_t which must be 8-byte aligned.
diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc
index 15543bd91..ab08744a7 100644
--- a/lib/msan/msan_interceptors.cc
+++ b/lib/msan/msan_interceptors.cc
@@ -1228,6 +1228,7 @@ INTERCEPTOR(void *, shmat, int shmid, const void *shmaddr, int shmflg) {
}
static void BeforeFork() {
+ get_allocator().ForceLock();
StackDepotLockAll();
ChainedOriginDepotLockAll();
}
@@ -1235,6 +1236,7 @@ static void BeforeFork() {
static void AfterFork() {
ChainedOriginDepotUnlockAll();
StackDepotUnlockAll();
+ get_allocator().ForceUnlock();
}
INTERCEPTOR(int, fork, void) {
diff --git a/test/sanitizer_common/TestCases/Linux/allocator_fork_no_hang.cc b/test/sanitizer_common/TestCases/Linux/allocator_fork_no_hang.cc
new file mode 100644
index 000000000..d159d85ee
--- /dev/null
+++ b/test/sanitizer_common/TestCases/Linux/allocator_fork_no_hang.cc
@@ -0,0 +1,118 @@
+// https://github.com/google/sanitizers/issues/774
+// Test that sanitizer allocator is fork-safe.
+// Run a number of threads that perform memory allocation/deallocation, then fork
+// and verify that malloc/free do not deadlock in the child process.
+
+// RUN: %clangxx -std=c++11 -O0 %s -o %t
+// RUN: ASAN_OPTIONS=detect_leaks=0 %run %t 2>&1 | FileCheck %s
+
+// Fun fact: if test output is redirected to a file (as opposed to
+// being piped directly to FileCheck), we may lose some "done"s due to
+// a kernel bug:
+// https://lkml.org/lkml/2014/2/17/324
+
+// UNSUPPORTED: tsan
+
+// Flaky on PPC64.
+// UNSUPPORTED: powerpc64-target-arch
+// UNSUPPORTED: powerpc64le-target-arch
+
+#include <pthread.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/time.h>
+#include <signal.h>
+#include <errno.h>
+
+int done;
+
+void *worker(void *arg) {
+ while (true) {
+ void *p = malloc(4);
+ if (__atomic_load_n(&done, __ATOMIC_RELAXED))
+ return 0;
+ }
+ return 0;
+}
+
+// Run through malloc/free in the child process.
+// This can deadlock on allocator cache refilling.
+void child() {
+ for (int i = 0; i < 10000; ++i) {
+ void *p = malloc(4);
+ }
+ write(2, "done\n", 5);
+}
+
+void test() {
+ const int kThreads = 10;
+ pthread_t t[kThreads];
+ for (int i = 0; i < kThreads; ++i)
+ pthread_create(&t[i], NULL, worker, (void*)(long)i);
+ usleep(100000);
+ pid_t pid = fork();
+ if (pid) {
+ // parent
+ __atomic_store_n(&done, 1, __ATOMIC_RELAXED);
+ pid_t p;
+ while ((p = wait(NULL)) == -1) { }
+ } else {
+ // child
+ child();
+ }
+}
+
+int main() {
+ const int kChildren = 30;
+ for (int i = 0; i < kChildren; ++i) {
+ pid_t pid = fork();
+ if (pid) {
+ // parent
+ } else {
+ test();
+ exit(0);
+ }
+ }
+
+ for (int i = 0; i < kChildren; ++i) {
+ pid_t p;
+ while ((p = wait(NULL)) == -1) { }
+ }
+
+ return 0;
+}
+
+// Expect 30 (== kChildren) "done" messages.
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done
+// CHECK: done