summaryrefslogtreecommitdiff
path: root/libsanitizer/lsan
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2017-10-19 13:23:59 +0200
committerJakub Jelinek <jakub@gcc.gnu.org>2017-10-19 13:23:59 +0200
commit5d3805fca3e9a199fbaa18aee3c05ecb30ebca61 (patch)
tree22f091462fe7932142888d43abb6ff39f610e7f7 /libsanitizer/lsan
parent93659712d9807d7ef01d472dc58119f4ac15cbad (diff)
ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch builtins...
* ubsan.c (ubsan_expand_null_ifn): Use _v1 suffixed type mismatch builtins, store max (log2 (align), 0) into uchar field instead of align into uptr field. (ubsan_expand_objsize_ifn): Use _v1 suffixed type mismatch builtins, store uchar 0 field instead of uptr 0 field. (instrument_nonnull_return): Use _v1 suffixed nonnull return builtin, instead of passing one address of struct with 2 locations pass two addresses of structs with 1 location each. * sanitizer.def (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH, BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_ABORT, BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN, BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_ABORT): Removed. (BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1, BUILT_IN_UBSAN_HANDLE_TYPE_MISMATCH_V1_ABORT, BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1, BUILT_IN_UBSAN_HANDLE_NONNULL_RETURN_V1_ABORT): New builtins. * c-c++-common/ubsan/float-cast-overflow-1.c: Drop value keyword from expected output regexps. * c-c++-common/ubsan/float-cast-overflow-2.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-3.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-4.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-5.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-6.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-8.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-9.c: Likewise. * c-c++-common/ubsan/float-cast-overflow-10.c: Likewise. * g++.dg/ubsan/float-cast-overflow-bf.C: Likewise. * gcc.dg/ubsan/float-cast-overflow-bf.c: Likewise. * g++.dg/asan/default-options-1.C (__asan_default_options): Add used attribute. * g++.dg/asan/asan_test.C: Run with ASAN_OPTIONS=handle_segv=2 in the environment. * All source files: Merge from upstream 315899. * asan/Makefile.am (nodist_saninclude_HEADERS): Add include/sanitizer/tsan_interface.h. * asan/libtool-version: Bump the libasan SONAME. * lsan/Makefile.am (sanitizer_lsan_files): Add lsan_common_mac.cc. (lsan_files): Add lsan_linux.cc, lsan_mac.cc and lsan_malloc_mac.cc. * sanitizer_common/Makefile.am (sanitizer_common_files): Add sancov_flags.cc, sanitizer_allocator_checks.cc, sanitizer_coverage_libcdep_new.cc, sanitizer_errno.cc, sanitizer_file.cc, sanitizer_mac_libcdep.cc and sanitizer_stoptheworld_mac.cc. Remove sanitizer_coverage_libcdep.cc and sanitizer_coverage_mapping_libcdep.cc. * tsan/Makefile.am (tsan_files): Add tsan_external.cc. * ubsan/Makefile.am (DEFS): Add -DUBSAN_CAN_USE_CXXABI=1. (ubsan_files): Add ubsan_init_standalone.cc and ubsan_signals_standalone.cc. * ubsan/libtool-version: Bump the libubsan SONAME. * asan/Makefile.in: Regenerate. * lsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.in: Regenerate. * tsan/Makefile.in: Regenerate. * ubsan/Makefile.in: Regenerate. From-SVN: r253887
Diffstat (limited to 'libsanitizer/lsan')
-rw-r--r--libsanitizer/lsan/Makefile.am6
-rw-r--r--libsanitizer/lsan/Makefile.in17
-rw-r--r--libsanitizer/lsan/lsan.cc17
-rw-r--r--libsanitizer/lsan/lsan.h49
-rw-r--r--libsanitizer/lsan/lsan_allocator.cc109
-rw-r--r--libsanitizer/lsan/lsan_allocator.h57
-rw-r--r--libsanitizer/lsan/lsan_common.cc253
-rw-r--r--libsanitizer/lsan/lsan_common.h83
-rw-r--r--libsanitizer/lsan/lsan_common_linux.cc104
-rw-r--r--libsanitizer/lsan/lsan_common_mac.cc197
-rw-r--r--libsanitizer/lsan/lsan_interceptors.cc236
-rw-r--r--libsanitizer/lsan/lsan_linux.cc31
-rw-r--r--libsanitizer/lsan/lsan_mac.cc190
-rw-r--r--libsanitizer/lsan/lsan_malloc_mac.cc53
-rw-r--r--libsanitizer/lsan/lsan_thread.cc21
-rw-r--r--libsanitizer/lsan/lsan_thread.h2
16 files changed, 1144 insertions, 281 deletions
diff --git a/libsanitizer/lsan/Makefile.am b/libsanitizer/lsan/Makefile.am
index 294342b1926..9638f30da1a 100644
--- a/libsanitizer/lsan/Makefile.am
+++ b/libsanitizer/lsan/Makefile.am
@@ -16,11 +16,15 @@ endif
sanitizer_lsan_files = \
lsan_common.cc \
- lsan_common_linux.cc
+ lsan_common_linux.cc \
+ lsan_common_mac.cc
lsan_files = \
$(sanitizer_lsan_files) \
lsan.cc \
+ lsan_linux.cc \
+ lsan_mac.cc \
+ lsan_malloc_mac.cc \
lsan_allocator.cc \
lsan_interceptors.cc \
lsan_preinit.cc \
diff --git a/libsanitizer/lsan/Makefile.in b/libsanitizer/lsan/Makefile.in
index d951741c300..f0d1e0f37da 100644
--- a/libsanitizer/lsan/Makefile.in
+++ b/libsanitizer/lsan/Makefile.in
@@ -107,9 +107,10 @@ liblsan_la_DEPENDENCIES = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/interception/libinterception.la \
$(am__append_1) $(am__DEPENDENCIES_1)
-am__objects_1 = lsan_common.lo lsan_common_linux.lo
-am__objects_2 = $(am__objects_1) lsan.lo lsan_allocator.lo \
- lsan_interceptors.lo lsan_preinit.lo lsan_thread.lo
+am__objects_1 = lsan_common.lo lsan_common_linux.lo lsan_common_mac.lo
+am__objects_2 = $(am__objects_1) lsan.lo lsan_linux.lo lsan_mac.lo \
+ lsan_malloc_mac.lo lsan_allocator.lo lsan_interceptors.lo \
+ lsan_preinit.lo lsan_thread.lo
am_liblsan_la_OBJECTS = $(am__objects_2)
liblsan_la_OBJECTS = $(am_liblsan_la_OBJECTS)
liblsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@@ -299,11 +300,15 @@ noinst_LTLIBRARIES = libsanitizer_lsan.la
@LSAN_SUPPORTED_TRUE@toolexeclib_LTLIBRARIES = liblsan.la
sanitizer_lsan_files = \
lsan_common.cc \
- lsan_common_linux.cc
+ lsan_common_linux.cc \
+ lsan_common_mac.cc
lsan_files = \
$(sanitizer_lsan_files) \
lsan.cc \
+ lsan_linux.cc \
+ lsan_mac.cc \
+ lsan_malloc_mac.cc \
lsan_allocator.cc \
lsan_interceptors.cc \
lsan_preinit.cc \
@@ -446,7 +451,11 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_common_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_interceptors.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_linux.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_mac.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_malloc_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_preinit.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/lsan_thread.Plo@am__quote@
diff --git a/libsanitizer/lsan/lsan.cc b/libsanitizer/lsan/lsan.cc
index 2ded5544c71..7540aeb327b 100644
--- a/libsanitizer/lsan/lsan.cc
+++ b/libsanitizer/lsan/lsan.cc
@@ -54,6 +54,9 @@ static void InitializeFlags() {
RegisterLsanFlags(&parser, f);
RegisterCommonFlags(&parser);
+ // Override from user-specified string.
+ const char *lsan_default_options = MaybeCallLsanDefaultOptions();
+ parser.ParseString(lsan_default_options);
parser.ParseString(GetEnv("LSAN_OPTIONS"));
SetVerbosity(common_flags()->verbosity);
@@ -63,6 +66,18 @@ static void InitializeFlags() {
if (common_flags()->help) parser.PrintFlagDescriptions();
}
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ GetStackTraceWithPcBpAndContext(stack, kStackTraceMax, sig.pc, sig.bp,
+ sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
+ nullptr);
+}
+
extern "C" void __lsan_init() {
CHECK(!lsan_init_is_running);
if (lsan_inited)
@@ -74,9 +89,11 @@ extern "C" void __lsan_init() {
InitializeFlags();
InitCommonLsan();
InitializeAllocator();
+ ReplaceSystemMalloc();
InitTlsSize();
InitializeInterceptors();
InitializeThreadRegistry();
+ InstallDeadlySignalHandlers(LsanOnDeadlySignal);
u32 tid = ThreadCreate(0, 0, true);
CHECK_EQ(tid, 0);
ThreadStart(tid, GetTid());
diff --git a/libsanitizer/lsan/lsan.h b/libsanitizer/lsan/lsan.h
index 6d2d427b27d..1cd1c36dbc4 100644
--- a/libsanitizer/lsan/lsan.h
+++ b/libsanitizer/lsan/lsan.h
@@ -10,24 +10,15 @@
//
//===----------------------------------------------------------------------===//
+#include "lsan_thread.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#define GET_STACK_TRACE(max_size, fast) \
- BufferedStackTrace stack; \
- { \
- uptr stack_top = 0, stack_bottom = 0; \
- ThreadContext *t; \
- if (fast && (t = CurrentThreadContext())) { \
- stack_top = t->stack_end(); \
- stack_bottom = t->stack_begin(); \
- } \
- if (!SANITIZER_MIPS || \
- IsValidFrame(GET_CURRENT_FRAME(), stack_top, stack_bottom)) { \
- stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
- /* context */ 0, stack_top, stack_bottom, fast); \
- } \
- }
+#define GET_STACK_TRACE(max_size, fast) \
+ __sanitizer::BufferedStackTrace stack; \
+ GetStackTraceWithPcBpAndContext(&stack, max_size, \
+ StackTrace::GetCurrentPc(), \
+ GET_CURRENT_FRAME(), nullptr, fast);
#define GET_STACK_TRACE_FATAL \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
@@ -36,9 +27,37 @@
GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
common_flags()->fast_unwind_on_malloc)
+#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
+
namespace __lsan {
void InitializeInterceptors();
+void ReplaceSystemMalloc();
+
+#define ENSURE_LSAN_INITED do { \
+ CHECK(!lsan_init_is_running); \
+ if (!lsan_inited) \
+ __lsan_init(); \
+} while (0)
+
+// Get the stack trace with the given pc and bp.
+// The pc will be in the position 0 of the resulting stack trace.
+// The bp may refer to the current frame or to the caller's frame.
+ALWAYS_INLINE
+void GetStackTraceWithPcBpAndContext(__sanitizer::BufferedStackTrace *stack,
+ __sanitizer::uptr max_depth,
+ __sanitizer::uptr pc, __sanitizer::uptr bp,
+ void *context, bool fast) {
+ uptr stack_top = 0, stack_bottom = 0;
+ ThreadContext *t;
+ if (fast && (t = CurrentThreadContext())) {
+ stack_top = t->stack_end();
+ stack_bottom = t->stack_begin();
+ }
+ if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
+ stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
+ }
+}
} // namespace __lsan
diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc
index 0d2fceac71b..9e166807791 100644
--- a/libsanitizer/lsan/lsan_allocator.cc
+++ b/libsanitizer/lsan/lsan_allocator.cc
@@ -13,7 +13,9 @@
#include "lsan_allocator.h"
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
@@ -22,51 +24,27 @@
extern "C" void *memset(void *ptr, int value, uptr num);
namespace __lsan {
-
-struct ChunkMetadata {
- u8 allocated : 8; // Must be first.
- ChunkTag tag : 2;
- uptr requested_size : 54;
- u32 stack_trace_id;
-};
-
-#if defined(__mips64) || defined(__aarch64__)
+#if defined(__i386__) || defined(__arm__)
+static const uptr kMaxAllowedMallocSize = 1UL << 30;
+#elif defined(__mips64) || defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 4UL << 30;
-static const uptr kRegionSizeLog = 20;
-static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
-typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
-typedef CompactSizeClassMap SizeClassMap;
-typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
- sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap>
- PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
-
-struct AP64 { // Allocator64 parameters. Deliberately using a short name.
- static const uptr kSpaceBeg = 0x600000000000ULL;
- static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
- static const uptr kMetadataSize = sizeof(ChunkMetadata);
- typedef DefaultSizeClassMap SizeClassMap;
- typedef NoOpMapUnmapCallback MapUnmapCallback;
- static const uptr kFlags = 0;
-};
-
-typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
static Allocator allocator;
-static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
- allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_release_to_os_interval_ms);
}
void AllocatorThreadFinish() {
- allocator.SwallowCache(&cache);
+ allocator.SwallowCache(GetAllocatorCache());
}
static ChunkMetadata *Metadata(const void *p) {
@@ -96,9 +74,9 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
size = 1;
if (size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
- return nullptr;
+ return Allocator::FailureHandler::OnBadRequest();
}
- void *p = allocator.Allocate(&cache, size, alignment, false);
+ void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
@@ -108,11 +86,18 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
return p;
}
+static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
+ return Allocator::FailureHandler::OnBadRequest();
+ size *= nmemb;
+ return Allocate(stack, size, 1, true);
+}
+
void Deallocate(void *p) {
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RunFreeHooks(p);
RegisterDeallocation(p);
- allocator.Deallocate(&cache, p);
+ allocator.Deallocate(GetAllocatorCache(), p);
}
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
@@ -120,17 +105,17 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
RegisterDeallocation(p);
if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
- allocator.Deallocate(&cache, p);
- return nullptr;
+ allocator.Deallocate(GetAllocatorCache(), p);
+ return Allocator::FailureHandler::OnBadRequest();
}
- p = allocator.Reallocate(&cache, p, new_size, alignment);
+ p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
return p;
}
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
- *begin = (uptr)&cache;
- *end = *begin + sizeof(cache);
+ *begin = (uptr)GetAllocatorCache();
+ *end = *begin + sizeof(AllocatorCache);
}
uptr GetMallocUsableSize(const void *p) {
@@ -139,6 +124,39 @@ uptr GetMallocUsableSize(const void *p) {
return m->requested_size;
}
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
+void *lsan_malloc(uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
+}
+
+void lsan_free(void *p) {
+ Deallocate(p);
+}
+
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Reallocate(stack, p, size, 1));
+}
+
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Calloc(nmemb, size, stack));
+}
+
+void *lsan_valloc(uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(
+ Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
+}
+
+uptr lsan_mz_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+
///// Interface to the common LSan module. /////
void LockAllocator() {
@@ -254,4 +272,17 @@ SANITIZER_INTERFACE_ATTRIBUTE
uptr __sanitizer_get_allocated_size(const void *p) {
return GetMallocUsableSize(p);
}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_free_hook(void *ptr) {
+ (void)ptr;
+}
+#endif
} // extern "C"
diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h
index aae0d28dc4a..b0c0ec241d9 100644
--- a/libsanitizer/lsan/lsan_allocator.h
+++ b/libsanitizer/lsan/lsan_allocator.h
@@ -13,8 +13,10 @@
#ifndef LSAN_ALLOCATOR_H
#define LSAN_ALLOCATOR_H
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "lsan_common.h"
namespace __lsan {
@@ -32,6 +34,61 @@ void GetAllocatorCacheRange(uptr *begin, uptr *end);
void AllocatorThreadFinish();
void InitializeAllocator();
+const bool kAlwaysClearMemory = true;
+
+struct ChunkMetadata {
+ u8 allocated : 8; // Must be first.
+ ChunkTag tag : 2;
+#if SANITIZER_WORDSIZE == 64
+ uptr requested_size : 54;
+#else
+ uptr requested_size : 32;
+ uptr padding : 22;
+#endif
+ u32 stack_trace_id;
+};
+
+#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
+ defined(__arm__)
+static const uptr kRegionSizeLog = 20;
+static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
+typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap;
+
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = __lsan::kRegionSizeLog;
+ typedef __lsan::ByteMap ByteMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#elif defined(__x86_64__) || defined(__powerpc64__)
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x600000000000ULL;
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#endif
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+
+AllocatorCache *GetAllocatorCache();
+
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_malloc(uptr size, const StackTrace &stack);
+void lsan_free(void *p);
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
+void *lsan_valloc(uptr size, const StackTrace &stack);
+uptr lsan_mz_size(const void *p);
+
} // namespace __lsan
#endif // LSAN_ALLOCATOR_H
diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc
index 41024e11873..a3274d5c1c3 100644
--- a/libsanitizer/lsan/lsan_common.cc
+++ b/libsanitizer/lsan/lsan_common.cc
@@ -30,20 +30,15 @@ namespace __lsan {
// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
-__attribute__((tls_model("initial-exec")))
-THREADLOCAL int disable_counter;
-bool DisabledInThisThread() { return disable_counter > 0; }
-void DisableInThisThread() { disable_counter++; }
-void EnableInThisThread() {
- if (!disable_counter && common_flags()->detect_leaks) {
+Flags lsan_flags;
+
+void DisableCounterUnderflow() {
+ if (common_flags()->detect_leaks) {
Report("Unmatched call to __lsan_enable().\n");
Die();
}
- disable_counter--;
}
-Flags lsan_flags;
-
void Flags::SetDefaults() {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "lsan_flags.inc"
@@ -71,6 +66,19 @@ ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
static SuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
static const char *kSuppressionTypes[] = { kSuppressionLeak };
+static const char kStdSuppressions[] =
+#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // definition.
+ "leak:*pthread_exit*\n"
+#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+#if SANITIZER_MAC
+ // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
+ "leak:*_os_trace*\n"
+#endif
+ // TLS leak in some glibc versions, described in
+ // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
+ "leak:*tls_get_addr*\n";
void InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx);
@@ -79,6 +87,7 @@ void InitializeSuppressions() {
suppression_ctx->ParseFromFile(flags()->suppressions);
if (&__lsan_default_suppressions)
suppression_ctx->Parse(__lsan_default_suppressions());
+ suppression_ctx->Parse(kStdSuppressions);
}
static SuppressionContext *GetSuppressionContext() {
@@ -86,12 +95,9 @@ static SuppressionContext *GetSuppressionContext() {
return suppression_ctx;
}
-struct RootRegion {
- const void *begin;
- uptr size;
-};
+static InternalMmapVector<RootRegion> *root_regions;
-InternalMmapVector<RootRegion> *root_regions;
+InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
void InitializeRootRegions() {
CHECK(!root_regions);
@@ -99,6 +105,10 @@ void InitializeRootRegions() {
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
}
+const char *MaybeCallLsanDefaultOptions() {
+ return (&__lsan_default_options) ? __lsan_default_options() : "";
+}
+
void InitCommonLsan() {
InitializeRootRegions();
if (common_flags()->detect_leaks) {
@@ -114,7 +124,6 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator {
Decorator() : SanitizerCommonDecorator() { }
const char *Error() { return Red(); }
const char *Leak() { return Blue(); }
- const char *End() { return Default(); }
};
static inline bool CanBeAHeapPointer(uptr p) {
@@ -178,6 +187,23 @@ void ScanRangeForPointers(uptr begin, uptr end,
}
}
+// Scans a global range for pointers
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
+ uptr allocator_begin = 0, allocator_end = 0;
+ GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
+ if (begin <= allocator_begin && allocator_begin < end) {
+ CHECK_LE(allocator_begin, allocator_end);
+ CHECK_LE(allocator_end, end);
+ if (begin < allocator_begin)
+ ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
+ kReachable);
+ if (allocator_end < end)
+ ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
+ } else {
+ ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
+ }
+}
+
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
@@ -186,11 +212,11 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
- InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
+ InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount());
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
uptr registers_end = registers_begin + registers.size();
- for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
- uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
+ for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
+ tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
DTLS *dtls;
@@ -204,11 +230,13 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
continue;
}
uptr sp;
- bool have_registers =
- (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
- if (!have_registers) {
- Report("Unable to get registers from thread %d.\n");
- // If unable to get SP, consider the entire stack to be reachable.
+ PtraceRegistersStatus have_registers =
+ suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
+ if (have_registers != REGISTERS_AVAILABLE) {
+ Report("Unable to get registers from thread %d.\n", os_id);
+ // If unable to get SP, consider the entire stack to be reachable unless
+ // GetRegistersAndSP failed with ESRCH.
+ if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
sp = stack_begin;
}
@@ -242,21 +270,23 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
if (flags()->use_tls) {
- LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
- if (cache_begin == cache_end) {
- ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
- } else {
- // Because LSan should not be loaded with dlopen(), we can assume
- // that allocator cache will be part of static TLS image.
- CHECK_LE(tls_begin, cache_begin);
- CHECK_GE(tls_end, cache_end);
- if (tls_begin < cache_begin)
- ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
- kReachable);
- if (tls_end > cache_end)
- ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
+ if (tls_begin) {
+ LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
+ // If the tls and cache ranges don't overlap, scan full tls range,
+ // otherwise, only scan the non-overlapping portions
+ if (cache_begin == cache_end || tls_end < cache_begin ||
+ tls_begin > cache_end) {
+ ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
+ } else {
+ if (tls_begin < cache_begin)
+ ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
+ kReachable);
+ if (tls_end > cache_end)
+ ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
+ kReachable);
+ }
}
- if (dtls) {
+ if (dtls && !DTLSInDestruction(dtls)) {
for (uptr j = 0; j < dtls->dtv_size; ++j) {
uptr dtls_beg = dtls->dtv[j].beg;
uptr dtls_end = dtls_beg + dtls->dtv[j].size;
@@ -266,28 +296,36 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
kReachable);
}
}
+ } else {
+ // We are handling a thread with DTLS under destruction. Log about
+ // this and continue.
+ LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
}
}
}
}
-static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
- uptr root_end) {
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- uptr begin, end, prot;
- while (proc_maps.Next(&begin, &end,
- /*offset*/ nullptr, /*filename*/ nullptr,
- /*filename_size*/ 0, &prot)) {
- uptr intersection_begin = Max(root_begin, begin);
- uptr intersection_end = Min(end, root_end);
- if (intersection_begin >= intersection_end) continue;
- bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
- LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
- root_begin, root_end, begin, end,
- is_readable ? "readable" : "unreadable");
- if (is_readable)
- ScanRangeForPointers(intersection_begin, intersection_end, frontier,
- "ROOT", kReachable);
+void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
+ uptr region_begin, uptr region_end, bool is_readable) {
+ uptr intersection_begin = Max(root_region.begin, region_begin);
+ uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
+ if (intersection_begin >= intersection_end) return;
+ LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
+ root_region.begin, root_region.begin + root_region.size,
+ region_begin, region_end,
+ is_readable ? "readable" : "unreadable");
+ if (is_readable)
+ ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
+ kReachable);
+}
+
+static void ProcessRootRegion(Frontier *frontier,
+ const RootRegion &root_region) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ ScanRootRegion(frontier, root_region, segment.start, segment.end,
+ segment.IsReadable());
}
}
@@ -296,9 +334,7 @@ static void ProcessRootRegions(Frontier *frontier) {
if (!flags()->use_root_regions) return;
CHECK(root_regions);
for (uptr i = 0; i < root_regions->size(); i++) {
- RootRegion region = (*root_regions)[i];
- uptr begin_addr = reinterpret_cast<uptr>(region.begin);
- ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
+ ProcessRootRegion(frontier, (*root_regions)[i]);
}
}
@@ -336,6 +372,72 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
}
}
+static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
+ CHECK(stack_id);
+ StackTrace stack = map->Get(stack_id);
+ // The top frame is our malloc/calloc/etc. The next frame is the caller.
+ if (stack.size >= 2)
+ return stack.trace[1];
+ return 0;
+}
+
+struct InvalidPCParam {
+ Frontier *frontier;
+ StackDepotReverseMap *stack_depot_reverse_map;
+ bool skip_linker_allocations;
+};
+
+// ForEachChunk callback. If the caller pc is invalid or is within the linker,
+// mark as reachable. Called by ProcessPlatformSpecificAllocations.
+static void MarkInvalidPCCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
+ u32 stack_id = m.stack_trace_id();
+ uptr caller_pc = 0;
+ if (stack_id > 0)
+ caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
+ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+ // it as reachable, as we can't properly report its allocation stack anyway.
+ if (caller_pc == 0 || (param->skip_linker_allocations &&
+ GetLinker()->containsAddress(caller_pc))) {
+ m.set_tag(kReachable);
+ param->frontier->push_back(chunk);
+ }
+ }
+}
+
+// On Linux, handles dynamically allocated TLS blocks by treating all chunks
+// allocated from ld-linux.so as reachable.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
+// On all other platforms, this simply checks to ensure that the caller pc is
+// valid before reporting chunks as leaked.
+void ProcessPC(Frontier *frontier) {
+ StackDepotReverseMap stack_depot_reverse_map;
+ InvalidPCParam arg;
+ arg.frontier = frontier;
+ arg.stack_depot_reverse_map = &stack_depot_reverse_map;
+ arg.skip_linker_allocations =
+ flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
+ ForEachChunk(MarkInvalidPCCb, &arg);
+}
+
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
@@ -347,11 +449,13 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
ProcessRootRegions(&frontier);
FloodFillTag(&frontier, kReachable);
+ CHECK_EQ(0, frontier.size());
+ ProcessPC(&frontier);
+
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
LOG_POINTERS("Processing platform-specific allocations.\n");
- CHECK_EQ(0, frontier.size());
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
@@ -461,7 +565,7 @@ static bool CheckForLeaks() {
"\n");
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
- Printf("%s", d.End());
+ Printf("%s", d.Default());
param.leak_report.ReportTopLeaks(flags()->max_leaks);
}
if (common_flags()->print_suppressions)
@@ -473,18 +577,16 @@ static bool CheckForLeaks() {
return false;
}
+static bool has_reported_leaks = false;
+bool HasReportedLeaks() { return has_reported_leaks; }
+
void DoLeakCheck() {
BlockingMutexLock l(&global_mutex);
static bool already_done;
if (already_done) return;
already_done = true;
- bool have_leaks = CheckForLeaks();
- if (!have_leaks) {
- return;
- }
- if (common_flags()->exitcode) {
- Die();
- }
+ has_reported_leaks = CheckForLeaks();
+ if (has_reported_leaks) HandleLeaks();
}
static int DoRecoverableLeakCheck() {
@@ -493,6 +595,8 @@ static int DoRecoverableLeakCheck() {
return have_leaks ? 1 : 0;
}
+void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
+
static Suppression *GetSuppressionForAddr(uptr addr) {
Suppression *s = nullptr;
@@ -597,7 +701,7 @@ void LeakReport::PrintReportForLeak(uptr index) {
Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
leaks_[index].total_size, leaks_[index].hit_count);
- Printf("%s", d.End());
+ Printf("%s", d.Default());
PrintStackTraceById(leaks_[index].stack_trace_id);
@@ -655,6 +759,7 @@ uptr LeakReport::UnsuppressedLeakCount() {
namespace __lsan {
void InitCommonLsan() { }
void DoLeakCheck() { }
+void DoRecoverableLeakCheckVoid() { }
void DisableInThisThread() { }
void EnableInThisThread() { }
}
@@ -687,7 +792,7 @@ void __lsan_register_root_region(const void *begin, uptr size) {
#if CAN_SANITIZE_LEAKS
BlockingMutexLock l(&global_mutex);
CHECK(root_regions);
- RootRegion region = {begin, size};
+ RootRegion region = {reinterpret_cast<uptr>(begin), size};
root_regions->push_back(region);
VReport(1, "Registered root region at %p of size %llu\n", begin, size);
#endif // CAN_SANITIZE_LEAKS
@@ -701,7 +806,7 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
bool removed = false;
for (uptr i = 0; i < root_regions->size(); i++) {
RootRegion region = (*root_regions)[i];
- if (region.begin == begin && region.size == size) {
+ if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
removed = true;
uptr last_index = root_regions->size() - 1;
(*root_regions)[i] = (*root_regions)[last_index];
@@ -753,8 +858,18 @@ int __lsan_do_recoverable_leak_check() {
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char * __lsan_default_options() {
+ return "";
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off() {
return 0;
}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_suppressions() {
+ return "";
+}
#endif
} // extern "C"
diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h
index 1091b84f108..e99cd9e1b52 100644
--- a/libsanitizer/lsan/lsan_common.h
+++ b/libsanitizer/lsan/lsan_common.h
@@ -20,8 +20,24 @@
#include "sanitizer_common/sanitizer_stoptheworld.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#if (SANITIZER_LINUX && !SANITIZER_ANDROID) && (SANITIZER_WORDSIZE == 64) \
- && (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__))
+// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
+// supported for Linux only. Also, LSan doesn't like 32 bit architectures
+// because of "small" (4 bytes) pointer size that leads to high false negative
+// ratio on large leaks. But we still want to have it for some 32 bit arches
+// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
+// To enable LeakSanitizer on new architecture, one need to implement
+// internal_clone function as well as (probably) adjust TLS machinery for
+// new architecture inside sanitizer library.
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
+ (SANITIZER_WORDSIZE == 64) && \
+ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
+ defined(__powerpc64__))
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__i386__) && \
+ (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__arm__) && \
+ SANITIZER_LINUX && !SANITIZER_ANDROID
#define CAN_SANITIZE_LEAKS 1
#else
#define CAN_SANITIZE_LEAKS 0
@@ -42,6 +58,8 @@ enum ChunkTag {
kIgnored = 3
};
+const u32 kInvalidTid = (u32) -1;
+
struct Flags {
#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
#include "lsan_flags.inc"
@@ -99,12 +117,22 @@ typedef InternalMmapVector<uptr> Frontier;
void InitializePlatformSpecificModules();
void ProcessGlobalRegions(Frontier *frontier);
void ProcessPlatformSpecificAllocations(Frontier *frontier);
+
+struct RootRegion {
+ uptr begin;
+ uptr size;
+};
+
+InternalMmapVector<RootRegion> const *GetRootRegions();
+void ScanRootRegion(Frontier *frontier, RootRegion const &region,
+ uptr region_begin, uptr region_end, bool is_readable);
// Run stoptheworld while holding any platform-specific locks.
void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
void ScanRangeForPointers(uptr begin, uptr end,
Frontier *frontier,
const char *region_type, ChunkTag tag);
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
enum IgnoreObjectResult {
kIgnoreObjectSuccess,
@@ -113,8 +141,11 @@ enum IgnoreObjectResult {
};
// Functions called from the parent tool.
+const char *MaybeCallLsanDefaultOptions();
void InitCommonLsan();
void DoLeakCheck();
+void DoRecoverableLeakCheckVoid();
+void DisableCounterUnderflow();
bool DisabledInThisThread();
// Used to implement __lsan::ScopedDisabler.
@@ -127,13 +158,36 @@ struct ScopedInterceptorDisabler {
~ScopedInterceptorDisabler() { EnableInThisThread(); }
};
+// According to Itanium C++ ABI array cookie is a one word containing
+// size of allocated array.
+static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
+// According to ARM C++ ABI array cookie consists of two words:
+// struct array_cookie {
+// std::size_t element_size; // element_size != 0
+// std::size_t element_count;
+// };
+static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
+}
+
// Special case for "new T[0]" where T is a type with DTOR.
-// new T[0] will allocate one word for the array size (0) and store a pointer
-// to the end of allocated chunk.
+// new T[0] will allocate a cookie (one or two words) for the array size (0)
+// and store a pointer to the end of allocated chunk. The actual cookie layout
+// varies between platforms according to their C++ ABI implementation.
inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
uptr addr) {
- return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
- *reinterpret_cast<uptr *>(chunk_beg) == 0;
+#if defined(__arm__)
+ return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
+#else
+ return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
+#endif
}
// The following must be implemented in the parent tool.
@@ -149,10 +203,10 @@ bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
-bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls);
-void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg);
// If called from the main thread, updates the main thread's TID in the thread
// registry. We need this to handle processes that fork() without a subsequent
@@ -168,6 +222,16 @@ uptr PointsIntoChunk(void *p);
uptr GetUserBegin(uptr chunk);
// Helper for __lsan_ignore_object().
IgnoreObjectResult IgnoreObjectLocked(const void *p);
+
+// Return the linker module, if valid for the platform.
+LoadedModule *GetLinker();
+
+// Return true if LSan has finished leak checking and reported leaks.
+bool HasReportedLeaks();
+
+// Run platform-specific leak handlers.
+void HandleLeaks();
+
// Wrapper for chunk metadata operations.
class LsanMetadata {
public:
@@ -186,6 +250,9 @@ class LsanMetadata {
extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off();
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
diff --git a/libsanitizer/lsan/lsan_common_linux.cc b/libsanitizer/lsan/lsan_common_linux.cc
index abbb61f07c9..677727229b1 100644
--- a/libsanitizer/lsan/lsan_common_linux.cc
+++ b/libsanitizer/lsan/lsan_common_linux.cc
@@ -32,6 +32,17 @@ static bool IsLinker(const char* full_name) {
return LibraryNameIs(full_name, kLinkerName);
}
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL int disable_counter;
+bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+ if (disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ disable_counter--;
+}
+
void InitializePlatformSpecificModules() {
ListOfModules modules;
modules.init();
@@ -49,8 +60,10 @@ void InitializePlatformSpecificModules() {
return;
}
}
- VReport(1, "LeakSanitizer: Dynamic linker not found. "
- "TLS will not be handled correctly.\n");
+ if (linker == nullptr) {
+ VReport(1, "LeakSanitizer: Dynamic linker not found. "
+ "TLS will not be handled correctly.\n");
+ }
}
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
@@ -65,20 +78,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
continue;
uptr begin = info->dlpi_addr + phdr->p_vaddr;
uptr end = begin + phdr->p_memsz;
- uptr allocator_begin = 0, allocator_end = 0;
- GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
- if (begin <= allocator_begin && allocator_begin < end) {
- CHECK_LE(allocator_begin, allocator_end);
- CHECK_LE(allocator_end, end);
- if (begin < allocator_begin)
- ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
- kReachable);
- if (allocator_end < end)
- ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
- kReachable);
- } else {
- ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
- }
+ ScanGlobalRange(begin, end, frontier);
}
return 0;
}
@@ -89,76 +89,22 @@ void ProcessGlobalRegions(Frontier *frontier) {
dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
}
-static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
- CHECK(stack_id);
- StackTrace stack = map->Get(stack_id);
- // The top frame is our malloc/calloc/etc. The next frame is the caller.
- if (stack.size >= 2)
- return stack.trace[1];
- return 0;
-}
+LoadedModule *GetLinker() { return linker; }
-struct ProcessPlatformAllocParam {
- Frontier *frontier;
- StackDepotReverseMap *stack_depot_reverse_map;
- bool skip_linker_allocations;
-};
-
-// ForEachChunk callback. Identifies unreachable chunks which must be treated as
-// reachable. Marks them as reachable and adds them to the frontier.
-static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
- CHECK(arg);
- ProcessPlatformAllocParam *param =
- reinterpret_cast<ProcessPlatformAllocParam *>(arg);
- chunk = GetUserBegin(chunk);
- LsanMetadata m(chunk);
- if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
- u32 stack_id = m.stack_trace_id();
- uptr caller_pc = 0;
- if (stack_id > 0)
- caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
- // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
- // it as reachable, as we can't properly report its allocation stack anyway.
- if (caller_pc == 0 || (param->skip_linker_allocations &&
- linker->containsAddress(caller_pc))) {
- m.set_tag(kReachable);
- param->frontier->push_back(chunk);
- }
- }
-}
-
-// Handles dynamically allocated TLS blocks by treating all chunks allocated
-// from ld-linux.so as reachable.
-// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
-// They are allocated with a __libc_memalign() call in allocate_and_init()
-// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
-// blocks, but we can make sure they come from our own allocator by intercepting
-// __libc_memalign(). On top of that, there is no easy way to reach them. Their
-// addresses are stored in a dynamically allocated array (the DTV) which is
-// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
-// being reachable from the static TLS, and the dynamic TLS being reachable from
-// the DTV. This is because the initial DTV is allocated before our interception
-// mechanism kicks in, and thus we don't recognize it as allocated memory. We
-// can't special-case it either, since we don't know its size.
-// Our solution is to include in the root set all allocations made from
-// ld-linux.so (which is where allocate_and_init() is implemented). This is
-// guaranteed to include all dynamic TLS blocks (and possibly other allocations
-// which we don't care about).
-void ProcessPlatformSpecificAllocations(Frontier *frontier) {
- StackDepotReverseMap stack_depot_reverse_map;
- ProcessPlatformAllocParam arg;
- arg.frontier = frontier;
- arg.stack_depot_reverse_map = &stack_depot_reverse_map;
- arg.skip_linker_allocations =
- flags()->use_tls && flags()->use_ld_allocations && linker != nullptr;
- ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
-}
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
struct DoStopTheWorldParam {
StopTheWorldCallback callback;
void *argument;
};
+// While calling Die() here is undefined behavior and can potentially
+// cause race conditions, it isn't possible to intercept exit on linux,
+// so we have no choice but to call Die() from the atexit handler.
+void HandleLeaks() {
+ if (common_flags()->exitcode) Die();
+}
+
static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
void *data) {
DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
diff --git a/libsanitizer/lsan/lsan_common_mac.cc b/libsanitizer/lsan/lsan_common_mac.cc
new file mode 100644
index 00000000000..e60b3d0f195
--- /dev/null
+++ b/libsanitizer/lsan/lsan_common_mac.cc
@@ -0,0 +1,197 @@
+//=-- lsan_common_mac.cc --------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Darwin-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "lsan_allocator.h"
+
+#include <pthread.h>
+
+#include <mach/mach.h>
+
+namespace __lsan {
+
+typedef struct {
+ int disable_counter;
+ u32 current_thread_id;
+ AllocatorCache cache;
+} thread_local_data_t;
+
+static pthread_key_t key;
+static pthread_once_t key_once = PTHREAD_ONCE_INIT;
+
+// The main thread destructor requires the current thread id,
+// so we can't destroy it until it's been used and reset to invalid tid
+void restore_tid_data(void *ptr) {
+ thread_local_data_t *data = (thread_local_data_t *)ptr;
+ if (data->current_thread_id != kInvalidTid)
+ pthread_setspecific(key, data);
+}
+
+static void make_tls_key() {
+ CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0);
+}
+
+static thread_local_data_t *get_tls_val(bool alloc) {
+ pthread_once(&key_once, make_tls_key);
+
+ thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key);
+ if (ptr == NULL && alloc) {
+ ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
+ ptr->disable_counter = 0;
+ ptr->current_thread_id = kInvalidTid;
+ ptr->cache = AllocatorCache();
+ pthread_setspecific(key, ptr);
+ }
+
+ return ptr;
+}
+
+bool DisabledInThisThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->disable_counter > 0 : false;
+}
+
+void DisableInThisThread() { ++get_tls_val(true)->disable_counter; }
+
+void EnableInThisThread() {
+ int *disable_counter = &get_tls_val(true)->disable_counter;
+ if (*disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ --*disable_counter;
+}
+
+u32 GetCurrentThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->current_thread_id : kInvalidTid;
+}
+
+void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
+
+AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
+
+LoadedModule *GetLinker() { return nullptr; }
+
+// Required on Linux for initialization of TLS behavior, but should not be
+// required on Darwin.
+void InitializePlatformSpecificModules() {}
+
+// Sections which can't contain contain global pointers. This list errs on the
+// side of caution to avoid false positives, at the expense of performance.
+//
+// Other potentially safe sections include:
+// __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break
+//
+// Sections which definitely cannot be included here are:
+// __objc_data, __objc_const, __data, __bss, __common, __thread_data,
+// __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs
+static const char *kSkippedSecNames[] = {
+ "__cfstring", "__la_symbol_ptr", "__mod_init_func",
+ "__mod_term_func", "__nl_symbol_ptr", "__objc_classlist",
+ "__objc_classrefs", "__objc_imageinfo", "__objc_nlclslist",
+ "__objc_protolist", "__objc_selrefs", "__objc_superrefs"};
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+ for (auto name : kSkippedSecNames) CHECK(ARRAY_SIZE(name) < kMaxSegName);
+
+ MemoryMappingLayout memory_mapping(false);
+ InternalMmapVector<LoadedModule> modules(/*initial_capacity*/ 128);
+ memory_mapping.DumpListOfModules(&modules);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ // Even when global scanning is disabled, we still need to scan
+ // system libraries for stashed pointers
+ if (!flags()->use_globals && modules[i].instrumented()) continue;
+
+ for (const __sanitizer::LoadedModule::AddressRange &range :
+ modules[i].ranges()) {
+ // Sections storing global variables are writable and non-executable
+ if (range.executable || !range.writable) continue;
+
+ for (auto name : kSkippedSecNames) {
+ if (!internal_strcmp(range.name, name)) continue;
+ }
+
+ ScanGlobalRange(range.beg, range.end, frontier);
+ }
+ }
+}
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {
+ mach_port_name_t port;
+ if (task_for_pid(mach_task_self(), internal_getpid(), &port)
+ != KERN_SUCCESS) {
+ return;
+ }
+
+ unsigned depth = 1;
+ vm_size_t size = 0;
+ vm_address_t address = 0;
+ kern_return_t err = KERN_SUCCESS;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+ InternalMmapVector<RootRegion> const *root_regions = GetRootRegions();
+
+ while (err == KERN_SUCCESS) {
+ struct vm_region_submap_info_64 info;
+ err = vm_region_recurse_64(port, &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
+
+ uptr end_address = address + size;
+
+ // libxpc stashes some pointers in the Kernel Alloc Once page,
+ // make sure not to report those as leaks.
+ if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) {
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+
+ // Recursing over the full memory map is very slow, break out
+ // early if we don't need the full iteration.
+ if (!flags()->use_root_regions || !root_regions->size())
+ break;
+ }
+
+ // This additional root region scan is required on Darwin in order to
+ // detect root regions contained within mmap'd memory regions, because
+ // the Darwin implementation of sanitizer_procmaps traverses images
+ // as loaded by dyld, and not the complete set of all memory regions.
+ //
+ // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
+ // behavior as sanitizer_procmaps_linux and traverses all memory regions
+ if (flags()->use_root_regions) {
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
+ info.protection & kProtectionRead);
+ }
+ }
+
+ address = end_address;
+ }
+}
+
+// On darwin, we can intercept _exit gracefully, and return a failing exit code
+// if required at that point. Calling Die() here is undefined behavior and
+// causes rare race conditions.
+void HandleLeaks() {}
+
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+ StopTheWorld(callback, argument);
+}
+
+} // namespace __lsan
+
+#endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC
diff --git a/libsanitizer/lsan/lsan_interceptors.cc b/libsanitizer/lsan/lsan_interceptors.cc
index 160ed5979c4..c9279aad676 100644
--- a/libsanitizer/lsan/lsan_interceptors.cc
+++ b/libsanitizer/lsan/lsan_interceptors.cc
@@ -17,13 +17,18 @@
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
+#include <stddef.h>
+
using namespace __lsan;
extern "C" {
@@ -34,29 +39,23 @@ int pthread_key_create(unsigned *key, void (*destructor)(void* v));
int pthread_setspecific(unsigned key, const void *v);
}
-#define ENSURE_LSAN_INITED do { \
- CHECK(!lsan_init_is_running); \
- if (!lsan_inited) \
- __lsan_init(); \
-} while (0)
-
///// Malloc/free interceptors. /////
-const bool kAlwaysClearMemory = true;
-
namespace std {
struct nothrow_t;
+ enum class align_val_t: size_t;
}
+#if !SANITIZER_MAC
INTERCEPTOR(void*, malloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, 1, kAlwaysClearMemory);
+ return lsan_malloc(size, stack);
}
INTERCEPTOR(void, free, void *p) {
ENSURE_LSAN_INITED;
- Deallocate(p);
+ lsan_free(p);
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
@@ -71,60 +70,76 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
CHECK(allocated < kCallocPoolSize);
return mem;
}
- if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr;
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- size *= nmemb;
- return Allocate(stack, size, 1, true);
+ return lsan_calloc(nmemb, size, stack);
}
INTERCEPTOR(void*, realloc, void *q, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Reallocate(stack, q, size, 1);
+ return lsan_realloc(q, size, stack);
}
-INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, alignment, kAlwaysClearMemory);
+ *memptr = lsan_memalign(alignment, size, stack);
+ // FIXME: Return ENOMEM if user requested more than max alloc size.
+ return 0;
}
-INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
+INTERCEPTOR(void*, valloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- return Allocate(stack, size, alignment, kAlwaysClearMemory);
+ return lsan_valloc(size, stack);
}
+#endif
-INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- *memptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
- // FIXME: Return ENOMEM if user requested more than max alloc size.
- return 0;
+ return lsan_memalign(alignment, size, stack);
}
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- void *res = Allocate(stack, size, alignment, kAlwaysClearMemory);
+ void *res = lsan_memalign(alignment, size, stack);
DTLS_on_libc_memalign(res, size);
return res;
}
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign)
+#else
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
+#endif // SANITIZER_INTERCEPT_MEMALIGN
-INTERCEPTOR(void*, valloc, uptr size) {
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
- if (size == 0)
- size = GetPageSizeCached();
- return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
+ return lsan_memalign(alignment, size, stack);
}
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
+#endif
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
ENSURE_LSAN_INITED;
return GetMallocUsableSize(ptr);
}
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
+ INTERCEPT_FUNCTION(malloc_usable_size)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
+#endif
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
struct fake_mallinfo {
int x[10];
};
@@ -134,11 +149,18 @@ INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
internal_memset(&res, 0, sizeof(res));
return res;
}
+#define LSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
INTERCEPTOR(int, mallopt, int cmd, int value) {
return -1;
}
+#define LSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLINFO
+#define LSAN_MAYBE_INTERCEPT_MALLOPT
+#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+#if SANITIZER_INTERCEPT_PVALLOC
INTERCEPTOR(void*, pvalloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
@@ -150,26 +172,81 @@ INTERCEPTOR(void*, pvalloc, uptr size) {
}
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
}
+#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_PVALLOC
+#endif // SANITIZER_INTERCEPT_PVALLOC
+#if SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
+#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
+#else
+#define LSAN_MAYBE_INTERCEPT_CFREE
+#endif // SANITIZER_INTERCEPT_CFREE
+
+#if SANITIZER_INTERCEPT_MCHECK_MPROBE
+INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
-#define OPERATOR_NEW_BODY \
- ENSURE_LSAN_INITED; \
- GET_STACK_TRACE_MALLOC; \
- return Allocate(stack, size, 1, kAlwaysClearMemory);
+INTERCEPTOR(int, mprobe, void *ptr) {
+ return 0;
+}
+#endif // SANITIZER_INTERCEPT_MCHECK_MPROBE
+
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(nothrow) \
+ ENSURE_LSAN_INITED; \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = lsan_malloc(size, stack); \
+ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \
+ return res;
+#define OPERATOR_NEW_BODY_ALIGN(nothrow) \
+ ENSURE_LSAN_INITED; \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = lsan_memalign((uptr)align, size, stack); \
+ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM(); \
+ return res;
+
+#define OPERATOR_DELETE_BODY \
+ ENSURE_LSAN_INITED; \
+ lsan_free(ptr);
+
+// On OS X it's not enough to just provide our own 'operator new' and
+// 'operator delete' implementations, because they're going to be in the runtime
+// dylib, and the main executable will depend on both the runtime dylib and
+// libstdc++, each of has its implementation of new and delete.
+// To make sure that C++ allocation/deallocation operators are overridden on
+// OS X we need to intercept them using their mangled names.
+#if !SANITIZER_MAC
INTERCEPTOR_ATTRIBUTE
-void *operator new(uptr size) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
-void *operator new[](uptr size) { OPERATOR_NEW_BODY; }
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
-void *operator new(uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
-void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
-
-#define OPERATOR_DELETE_BODY \
- ENSURE_LSAN_INITED; \
- Deallocate(ptr);
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
@@ -178,9 +255,55 @@ void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
INTERCEPTOR_ATTRIBUTE
-void operator delete[](void *ptr, std::nothrow_t const &) {
- OPERATOR_DELETE_BODY;
-}
+void operator delete[](void *ptr, std::nothrow_t const &)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+
+#else // SANITIZER_MAC
+
+INTERCEPTOR(void *, _Znwm, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _Znam, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+
+INTERCEPTOR(void, _ZdlPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+
+#endif // !SANITIZER_MAC
+
///// Thread initialization and finalization. /////
@@ -250,7 +373,8 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
}
if (res == 0) {
- int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
+ int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
+ IsStateDetached(detached));
CHECK_NE(tid, 0);
atomic_store(&p.tid, tid, memory_order_release);
while (atomic_load(&p.tid, memory_order_acquire) != 0)
@@ -270,24 +394,36 @@ INTERCEPTOR(int, pthread_join, void *th, void **ret) {
return res;
}
+INTERCEPTOR(void, _exit, int status) {
+ if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
+ REAL(_exit)(status);
+}
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
namespace __lsan {
void InitializeInterceptors() {
+ InitializeSignalInterceptors();
+
INTERCEPT_FUNCTION(malloc);
INTERCEPT_FUNCTION(free);
- INTERCEPT_FUNCTION(cfree);
+ LSAN_MAYBE_INTERCEPT_CFREE;
INTERCEPT_FUNCTION(calloc);
INTERCEPT_FUNCTION(realloc);
- INTERCEPT_FUNCTION(memalign);
+ LSAN_MAYBE_INTERCEPT_MEMALIGN;
+ LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
+ LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC;
INTERCEPT_FUNCTION(posix_memalign);
- INTERCEPT_FUNCTION(__libc_memalign);
INTERCEPT_FUNCTION(valloc);
- INTERCEPT_FUNCTION(pvalloc);
- INTERCEPT_FUNCTION(malloc_usable_size);
- INTERCEPT_FUNCTION(mallinfo);
- INTERCEPT_FUNCTION(mallopt);
+ LSAN_MAYBE_INTERCEPT_PVALLOC;
+ LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
+ LSAN_MAYBE_INTERCEPT_MALLINFO;
+ LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(_exit);
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Report("LeakSanitizer: failed to create thread key.\n");
diff --git a/libsanitizer/lsan/lsan_linux.cc b/libsanitizer/lsan/lsan_linux.cc
new file mode 100644
index 00000000000..aa6445a9877
--- /dev/null
+++ b/libsanitizer/lsan/lsan_linux.cc
@@ -0,0 +1,31 @@
+//=-- lsan_linux.cc -------------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer. Linux-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_LINUX
+
+#include "lsan_allocator.h"
+
+namespace __lsan {
+
+static THREADLOCAL u32 current_thread_tid = kInvalidTid;
+u32 GetCurrentThread() { return current_thread_tid; }
+void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
+
+static THREADLOCAL AllocatorCache allocator_cache;
+AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
+
+void ReplaceSystemMalloc() {}
+
+} // namespace __lsan
+
+#endif // SANITIZER_LINUX
diff --git a/libsanitizer/lsan/lsan_mac.cc b/libsanitizer/lsan/lsan_mac.cc
new file mode 100644
index 00000000000..ca38c1c6f8a
--- /dev/null
+++ b/libsanitizer/lsan/lsan_mac.cc
@@ -0,0 +1,190 @@
+//===-- lsan_mac.cc -------------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer, a memory leak checker.
+//
+// Mac-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "interception/interception.h"
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+#include <pthread.h>
+
+namespace __lsan {
+// Support for the following functions from libdispatch on Mac OS:
+// dispatch_async_f()
+// dispatch_async()
+// dispatch_sync_f()
+// dispatch_sync()
+// dispatch_after_f()
+// dispatch_after()
+// dispatch_group_async_f()
+// dispatch_group_async()
+// TODO(glider): libdispatch API contains other functions that we don't support
+// yet.
+//
+// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
+// they can cause jobs to run on a thread different from the current one.
+// TODO(glider): if so, we need a test for this (otherwise we should remove
+// them).
+//
+// The following functions use dispatch_barrier_async_f() (which isn't a library
+// function but is exported) and are thus supported:
+// dispatch_source_set_cancel_handler_f()
+// dispatch_source_set_cancel_handler()
+// dispatch_source_set_event_handler_f()
+// dispatch_source_set_event_handler()
+//
+// The reference manual for Grand Central Dispatch is available at
+// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
+// The implementation details are at
+// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
+
+typedef void *dispatch_group_t;
+typedef void *dispatch_queue_t;
+typedef void *dispatch_source_t;
+typedef u64 dispatch_time_t;
+typedef void (*dispatch_function_t)(void *block);
+typedef void *(*worker_t)(void *block);
+
+// A wrapper for the ObjC blocks used to support libdispatch.
+typedef struct {
+ void *block;
+ dispatch_function_t func;
+ u32 parent_tid;
+} lsan_block_context_t;
+
+ALWAYS_INLINE
+void lsan_register_worker_thread(int parent_tid) {
+ if (GetCurrentThread() == kInvalidTid) {
+ u32 tid = ThreadCreate(parent_tid, 0, true);
+ ThreadStart(tid, GetTid());
+ SetCurrentThread(tid);
+ }
+}
+
+// For use by only those functions that allocated the context via
+// alloc_lsan_context().
+extern "C" void lsan_dispatch_call_block_and_release(void *block) {
+ lsan_block_context_t *context = (lsan_block_context_t *)block;
+ VReport(2,
+ "lsan_dispatch_call_block_and_release(): "
+ "context: %p, pthread_self: %p\n",
+ block, pthread_self());
+ lsan_register_worker_thread(context->parent_tid);
+ // Call the original dispatcher for the block.
+ context->func(context->block);
+ lsan_free(context);
+}
+
+} // namespace __lsan
+
+using namespace __lsan; // NOLINT
+
+// Wrap |ctxt| and |func| into an lsan_block_context_t.
+// The caller retains control of the allocated context.
+extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_THREAD;
+ lsan_block_context_t *lsan_ctxt =
+ (lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
+ lsan_ctxt->block = ctxt;
+ lsan_ctxt->func = func;
+ lsan_ctxt->parent_tid = GetCurrentThread();
+ return lsan_ctxt;
+}
+
+// Define interceptor for dispatch_*_f function with the three most common
+// parameters: dispatch_queue_t, context, dispatch_function_t.
+#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
+ INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
+ dispatch_function_t func) { \
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \
+ return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \
+ lsan_dispatch_call_block_and_release); \
+ }
+
+INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
+
+INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq,
+ void *ctxt, dispatch_function_t func) {
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
+ return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt,
+ lsan_dispatch_call_block_and_release);
+}
+
+INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
+ dispatch_queue_t dq, void *ctxt, dispatch_function_t func) {
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
+ REAL(dispatch_group_async_f)
+ (group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release);
+}
+
+#if !defined(MISSING_BLOCKS_SUPPORT)
+extern "C" {
+void dispatch_async(dispatch_queue_t dq, void (^work)(void));
+void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
+ void (^work)(void));
+void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
+ void (^work)(void));
+void dispatch_source_set_cancel_handler(dispatch_source_t ds,
+ void (^work)(void));
+void dispatch_source_set_event_handler(dispatch_source_t ds,
+ void (^work)(void));
+}
+
+#define GET_LSAN_BLOCK(work) \
+ void (^lsan_block)(void); \
+ int parent_tid = GetCurrentThread(); \
+ lsan_block = ^(void) { \
+ lsan_register_worker_thread(parent_tid); \
+ work(); \
+ }
+
+INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_async)(dq, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg,
+ dispatch_queue_t dq, void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_group_async)(dg, dq, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue,
+ void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_after)(when, queue, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds,
+ void (^work)(void)) {
+ if (!work) {
+ REAL(dispatch_source_set_cancel_handler)(ds, work);
+ return;
+ }
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_source_set_cancel_handler)(ds, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds,
+ void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_source_set_event_handler)(ds, lsan_block);
+}
+#endif
+
+#endif // SANITIZER_MAC
diff --git a/libsanitizer/lsan/lsan_malloc_mac.cc b/libsanitizer/lsan/lsan_malloc_mac.cc
new file mode 100644
index 00000000000..2d810af841f
--- /dev/null
+++ b/libsanitizer/lsan/lsan_malloc_mac.cc
@@ -0,0 +1,53 @@
+//===-- lsan_malloc_mac.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer (LSan), a memory leak detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+using namespace __lsan;
+#define COMMON_MALLOC_ZONE_NAME "lsan"
+#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED
+#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_memalign(alignment, size, stack)
+#define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_malloc(size, stack)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_realloc(ptr, size, stack)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_calloc(count, size, stack)
+#define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_valloc(size, stack)
+#define COMMON_MALLOC_FREE(ptr) \
+ lsan_free(ptr)
+#define COMMON_MALLOC_SIZE(ptr) \
+ uptr size = lsan_mz_size(ptr)
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __lsan
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif // SANITIZER_MAC
diff --git a/libsanitizer/lsan/lsan_thread.cc b/libsanitizer/lsan/lsan_thread.cc
index af5ad47913f..e03e8766ae1 100644
--- a/libsanitizer/lsan/lsan_thread.cc
+++ b/libsanitizer/lsan/lsan_thread.cc
@@ -17,13 +17,11 @@
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan_allocator.h"
+#include "lsan_common.h"
namespace __lsan {
-const u32 kInvalidTid = (u32) -1;
-
static ThreadRegistry *thread_registry;
-static THREADLOCAL u32 current_thread_tid = kInvalidTid;
static ThreadContextBase *CreateThreadContext(u32 tid) {
void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
@@ -39,14 +37,6 @@ void InitializeThreadRegistry() {
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
-u32 GetCurrentThread() {
- return current_thread_tid;
-}
-
-void SetCurrentThread(u32 tid) {
- current_thread_tid = tid;
-}
-
ThreadContext::ThreadContext(int tid)
: ThreadContextBase(tid),
stack_begin_(0),
@@ -85,7 +75,7 @@ u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
/* arg */ nullptr);
}
-void ThreadStart(u32 tid, uptr os_id) {
+void ThreadStart(u32 tid, tid_t os_id, bool workerthread) {
OnStartedArgs args;
uptr stack_size = 0;
uptr tls_size = 0;
@@ -95,11 +85,12 @@ void ThreadStart(u32 tid, uptr os_id) {
args.tls_end = args.tls_begin + tls_size;
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
args.dtls = DTLS_Get();
- thread_registry->StartThread(tid, os_id, &args);
+ thread_registry->StartThread(tid, os_id, workerthread, &args);
}
void ThreadFinish() {
thread_registry->FinishThread(GetCurrentThread());
+ SetCurrentThread(kInvalidTid);
}
ThreadContext *CurrentThreadContext() {
@@ -134,7 +125,7 @@ void EnsureMainThreadIDIsCorrect() {
///// Interface to the common LSan module. /////
-bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
uptr *cache_end, DTLS **dtls) {
ThreadContext *context = static_cast<ThreadContext *>(
@@ -150,7 +141,7 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
return true;
}
-void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
void *arg) {
}
diff --git a/libsanitizer/lsan/lsan_thread.h b/libsanitizer/lsan/lsan_thread.h
index dafd8af0a29..86758347432 100644
--- a/libsanitizer/lsan/lsan_thread.h
+++ b/libsanitizer/lsan/lsan_thread.h
@@ -43,7 +43,7 @@ class ThreadContext : public ThreadContextBase {
void InitializeThreadRegistry();
-void ThreadStart(u32 tid, uptr os_id);
+void ThreadStart(u32 tid, tid_t os_id, bool workerthread = false);
void ThreadFinish();
u32 ThreadCreate(u32 tid, uptr uid, bool detached);
void ThreadJoin(u32 tid);