summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexey Samsonov <vonosmas@gmail.com>2014-12-12 20:07:35 +0000
committerAlexey Samsonov <vonosmas@gmail.com>2014-12-12 20:07:35 +0000
commit615a96827869f87d4e163d2c3e6786ca5cdb18ff (patch)
treefc15017b0af4393b5905f0ec0215fb2bb1b96e86
parent42bcd70673d86f6bb07affff1b42b4647efbe99d (diff)
[Sanitizer] Introduce Allocator::may_return_null bool flag.
Summary: Turn "allocator_may_return_null" common flag into an Allocator::may_return_null bool flag. We want to make sure that common flags are immutable after initialization. There are cases when we want to change this flag in the allocator at runtime: e.g. in unit tests and during ASan activation on Android. Test Plan: regression test suite, real-life applications Reviewers: kcc, eugenis Subscribers: llvm-commits Differential Revision: http://reviews.llvm.org/D6623 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@224148 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/asan/asan_activation.cc21
-rw-r--r--lib/asan/asan_allocator.h4
-rw-r--r--lib/asan/asan_allocator2.cc15
-rw-r--r--lib/asan/asan_rtl.cc3
-rw-r--r--lib/lsan/lsan_allocator.cc2
-rw-r--r--lib/msan/msan.h1
-rw-r--r--lib/msan/msan_allocator.cc11
-rw-r--r--lib/msan/msan_interceptors.cc6
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc9
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h41
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc18
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc6
-rw-r--r--lib/tsan/rtl/tsan_mman.cc13
-rw-r--r--lib/tsan/rtl/tsan_mman.h1
14 files changed, 91 insertions, 60 deletions
diff --git a/lib/asan/asan_activation.cc b/lib/asan/asan_activation.cc
index eb4a6db0b..c4733a337 100644
--- a/lib/asan/asan_activation.cc
+++ b/lib/asan/asan_activation.cc
@@ -60,29 +60,26 @@ void AsanActivate() {
// Restore flag values.
// FIXME: this is not atomic, and there may be other threads alive.
- flags()->quarantine_size = asan_deactivated_flags.quarantine_size;
flags()->max_redzone = asan_deactivated_flags.max_redzone;
flags()->poison_heap = asan_deactivated_flags.poison_heap;
common_flags()->malloc_context_size =
asan_deactivated_flags.malloc_context_size;
flags()->alloc_dealloc_mismatch =
asan_deactivated_flags.alloc_dealloc_mismatch;
- common_flags()->allocator_may_return_null =
- asan_deactivated_flags.allocator_may_return_null;
ParseExtraActivationFlags();
- ReInitializeAllocator();
+ ReInitializeAllocator(asan_deactivated_flags.allocator_may_return_null,
+ asan_deactivated_flags.quarantine_size);
asan_is_deactivated = false;
- VReport(
- 1,
- "quarantine_size %d, max_redzone %d, poison_heap %d, "
- "malloc_context_size %d, alloc_dealloc_mismatch %d, "
- "allocator_may_return_null %d\n",
- flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap,
- common_flags()->malloc_context_size, flags()->alloc_dealloc_mismatch,
- common_flags()->allocator_may_return_null);
+ VReport(1, "quarantine_size %d, max_redzone %d, poison_heap %d, "
+ "malloc_context_size %d, alloc_dealloc_mismatch %d, "
+ "allocator_may_return_null %d\n",
+ asan_deactivated_flags.quarantine_size, flags()->max_redzone,
+ flags()->poison_heap, common_flags()->malloc_context_size,
+ flags()->alloc_dealloc_mismatch,
+ asan_deactivated_flags.allocator_may_return_null);
}
} // namespace __asan
diff --git a/lib/asan/asan_allocator.h b/lib/asan/asan_allocator.h
index 6d3a99282..a82e33ee1 100644
--- a/lib/asan/asan_allocator.h
+++ b/lib/asan/asan_allocator.h
@@ -31,8 +31,8 @@ enum AllocType {
static const uptr kNumberOfSizeClasses = 255;
struct AsanChunk;
-void InitializeAllocator();
-void ReInitializeAllocator();
+void InitializeAllocator(bool may_return_null, uptr quarantine_size);
+void ReInitializeAllocator(bool may_return_null, uptr quarantine_size);
class AsanChunkView {
public:
diff --git a/lib/asan/asan_allocator2.cc b/lib/asan/asan_allocator2.cc
index 52bdcf607..8900c9746 100644
--- a/lib/asan/asan_allocator2.cc
+++ b/lib/asan/asan_allocator2.cc
@@ -253,13 +253,14 @@ struct QuarantineCallback {
AllocatorCache *cache_;
};
-void InitializeAllocator() {
- allocator.Init();
- quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
+void InitializeAllocator(bool may_return_null, uptr quarantine_size) {
+ allocator.Init(may_return_null);
+ quarantine.Init(quarantine_size, kMaxThreadLocalQuarantine);
}
-void ReInitializeAllocator() {
- quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine);
+void ReInitializeAllocator(bool may_return_null, uptr quarantine_size) {
+ allocator.SetMayReturnNull(may_return_null);
+ quarantine.Init(quarantine_size, kMaxThreadLocalQuarantine);
}
static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
@@ -297,7 +298,7 @@ static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate %p bytes\n",
(void*)size);
- return AllocatorReturnNull();
+ return allocator.ReturnNullOrDie();
}
AsanThread *t = GetCurrentThread();
@@ -598,7 +599,7 @@ void *asan_malloc(uptr size, BufferedStackTrace *stack) {
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return AllocatorReturnNull();
+ return allocator.ReturnNullOrDie();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
diff --git a/lib/asan/asan_rtl.cc b/lib/asan/asan_rtl.cc
index 0f608f13c..3c36bf636 100644
--- a/lib/asan/asan_rtl.cc
+++ b/lib/asan/asan_rtl.cc
@@ -658,7 +658,8 @@ static void AsanInitInternal() {
AsanTSDInit(PlatformTSDDtor);
InstallDeadlySignalHandlers(AsanOnSIGSEGV);
- InitializeAllocator();
+ InitializeAllocator(common_flags()->allocator_may_return_null,
+ flags()->quarantine_size);
// On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
// should be set to 1 prior to initializing the threads.
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index 8be2a2ad9..4d0fbabfa 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -47,7 +47,7 @@ static Allocator allocator;
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
- allocator.Init();
+ allocator.Init(common_flags()->allocator_may_return_null);
}
void AllocatorThreadFinish() {
diff --git a/lib/msan/msan.h b/lib/msan/msan.h
index 9bc1e4c6a..acfed2c44 100644
--- a/lib/msan/msan.h
+++ b/lib/msan/msan.h
@@ -125,6 +125,7 @@ char *GetProcSelfMaps();
void InitializeInterceptors();
void MsanAllocatorThreadFinish();
+void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size);
void *MsanReallocate(StackTrace *stack, void *oldp, uptr size,
uptr alignment, bool zeroise);
void MsanDeallocate(StackTrace *stack, void *ptr);
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index f21d71409..958dae823 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -73,7 +73,7 @@ static inline void Init() {
if (inited) return;
__msan_init();
inited = true; // this must happen before any threads are created.
- allocator.Init();
+ allocator.Init(common_flags()->allocator_may_return_null);
}
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
@@ -92,7 +92,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
- return AllocatorReturnNull();
+ return allocator.ReturnNullOrDie();
}
MsanThread *t = GetCurrentThread();
void *allocated;
@@ -147,6 +147,13 @@ void MsanDeallocate(StackTrace *stack, void *p) {
}
}
+void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+ Init();
+ if (CallocShouldReturnNullDueToOverflow(size, nmemb))
+ return allocator.ReturnNullOrDie();
+ return MsanReallocate(stack, 0, nmemb * size, sizeof(u64), true);
+}
+
void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
uptr alignment, bool zeroise) {
if (!old_p)
diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc
index bbdf18e16..608fb95bd 100644
--- a/lib/msan/msan_interceptors.cc
+++ b/lib/msan/msan_interceptors.cc
@@ -925,10 +925,8 @@ INTERCEPTOR(SSIZE_T, recvfrom, int fd, void *buf, SIZE_T len, int flags,
}
INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
- if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return AllocatorReturnNull();
GET_MALLOC_STACK_TRACE;
- if (!msan_inited) {
+ if (UNLIKELY(!msan_inited)) {
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
const SIZE_T kCallocPoolSize = 1024;
static uptr calloc_memory_for_dlsym[kCallocPoolSize];
@@ -939,7 +937,7 @@ INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
CHECK(allocated < kCallocPoolSize);
return mem;
}
- return MsanReallocate(&stack, 0, nmemb * size, sizeof(u64), true);
+ return MsanCalloc(&stack, nmemb, size);
}
INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index 47509f836..03b3e8315 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -14,7 +14,6 @@
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_common.h"
-#include "sanitizer_flags.h"
namespace __sanitizer {
@@ -61,7 +60,7 @@ InternalAllocator *internal_allocator() {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
- internal_allocator_instance->Init();
+ internal_allocator_instance->Init(/* may_return_null*/ false);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
@@ -140,14 +139,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
-void *AllocatorReturnNull() {
- if (common_flags()->allocator_may_return_null)
- return 0;
+void NORETURN ReportAllocatorCannotReturnNull() {
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
CHECK(0);
- return 0;
+ Die();
}
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index 23218016b..d0ae90b01 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -23,8 +23,8 @@
namespace __sanitizer {
-// Depending on allocator_may_return_null either return 0 or crash.
-void *AllocatorReturnNull();
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull();
// SizeClassMap maps allocation sizes into size classes and back.
// Class 0 corresponds to size 0.
@@ -1002,9 +1002,10 @@ struct SizeClassAllocatorLocalCache {
template <class MapUnmapCallback = NoOpMapUnmapCallback>
class LargeMmapAllocator {
public:
- void Init() {
+ void Init(bool may_return_null) {
internal_memset(this, 0, sizeof(*this));
page_size_ = GetPageSizeCached();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
@@ -1012,7 +1013,9 @@ class LargeMmapAllocator {
uptr map_size = RoundUpMapSize(size);
if (alignment > page_size_)
map_size += alignment;
- if (map_size < size) return AllocatorReturnNull(); // Overflow.
+ // Overflow.
+ if (map_size < size)
+ return ReturnNullOrDie();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
CHECK(IsAligned(map_beg, page_size_));
@@ -1048,6 +1051,16 @@ class LargeMmapAllocator {
return reinterpret_cast<void*>(res);
}
+ void *ReturnNullOrDie() {
+ if (atomic_load(&may_return_null_, memory_order_acquire))
+ return 0;
+ ReportAllocatorCannotReturnNull();
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
@@ -1226,6 +1239,7 @@ class LargeMmapAllocator {
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
+ atomic_uint8_t may_return_null_;
SpinMutex mutex_;
};
@@ -1239,10 +1253,11 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- void Init() {
+ void Init(bool may_return_null) {
primary_.Init();
- secondary_.Init();
+ secondary_.Init(may_return_null);
stats_.Init();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
@@ -1251,7 +1266,7 @@ class CombinedAllocator {
if (size == 0)
size = 1;
if (size + alignment < size)
- return AllocatorReturnNull();
+ return ReturnNullOrDie();
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
@@ -1267,6 +1282,17 @@ class CombinedAllocator {
return res;
}
+ void *ReturnNullOrDie() {
+ if (atomic_load(&may_return_null_, memory_order_acquire))
+ return 0;
+ ReportAllocatorCannotReturnNull();
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ secondary_.SetMayReturnNull(may_return_null);
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return;
if (primary_.PointerIsMine(p))
@@ -1379,6 +1405,7 @@ class CombinedAllocator {
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
+ atomic_uint8_t may_return_null_;
};
// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index a571f3674..2a9c5e102 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -14,7 +14,6 @@
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_test_utils.h"
#include "sanitizer_pthread_wrappers.h"
@@ -299,7 +298,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
TestMapUnmapCallback::map_count = 0;
TestMapUnmapCallback::unmap_count = 0;
LargeMmapAllocator<TestMapUnmapCallback> a;
- a.Init();
+ a.Init(/* may_return_null */ false);
AllocatorStats stats;
stats.Init();
void *x = a.Allocate(&stats, 1 << 20, 1);
@@ -333,7 +332,7 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
#if !defined(_WIN32) // FIXME: This currently fails on Windows.
TEST(SanitizerCommon, LargeMmapAllocator) {
LargeMmapAllocator<> a;
- a.Init();
+ a.Init(/* may_return_null */ false);
AllocatorStats stats;
stats.Init();
@@ -415,25 +414,22 @@ void TestCombinedAllocator() {
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
Allocator;
Allocator *a = new Allocator;
- a->Init();
+ a->Init(/* may_return_null */ true);
AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
a->InitCache(&cache);
- bool allocator_may_return_null = common_flags()->allocator_may_return_null;
- common_flags()->allocator_may_return_null = true;
EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
- common_flags()->allocator_may_return_null = false;
+ // Set to false
+ a->SetMayReturnNull(false);
EXPECT_DEATH(a->Allocate(&cache, -1, 1),
"allocator is terminating the process");
- // Restore the original value.
- common_flags()->allocator_may_return_null = allocator_may_return_null;
const uptr kNumAllocs = 100000;
const uptr kNumIter = 10;
@@ -708,7 +704,7 @@ TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
LargeMmapAllocator<> a;
- a.Init();
+ a.Init(/* may_return_null */ false);
AllocatorStats stats;
stats.Init();
@@ -735,7 +731,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
LargeMmapAllocator<> a;
- a.Init();
+ a.Init(/* may_return_null */ false);
AllocatorStats stats;
stats.Init();
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index 5bede0ec7..3ac3f13af 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -505,14 +505,10 @@ TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
if (cur_thread()->in_symbolizer)
return __libc_calloc(size, n);
- if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, n))
- return AllocatorReturnNull();
void *p = 0;
{
SCOPED_INTERCEPTOR_RAW(calloc, size, n);
- p = user_alloc(thr, pc, n * size);
- if (p)
- internal_memset(p, 0, n * size);
+ p = user_calloc(thr, pc, size, n);
}
invoke_malloc_hook(p, n * size);
return p;
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 285bdb34d..ebb3f77fb 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -45,7 +45,7 @@ Allocator *allocator() {
}
void InitializeAllocator() {
- allocator()->Init();
+ allocator()->Init(common_flags()->allocator_may_return_null);
}
void AllocatorThreadStart(ThreadState *thr) {
@@ -78,7 +78,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return AllocatorReturnNull();
+ return allocator()->ReturnNullOrDie();
void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
if (p == 0)
return 0;
@@ -89,6 +89,15 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
return p;
}
+void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
+ if (CallocShouldReturnNullDueToOverflow(size, n))
+ return allocator()->ReturnNullOrDie();
+ void *p = user_alloc(thr, pc, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
+ return p;
+}
+
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
if (ctx && ctx->initialized)
OnUserFree(thr, pc, (uptr)p, true);
diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h
index 7d41fa864..5ff956d82 100644
--- a/lib/tsan/rtl/tsan_mman.h
+++ b/lib/tsan/rtl/tsan_mman.h
@@ -27,6 +27,7 @@ void AllocatorPrintStats();
// For user allocations.
void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
uptr align = kDefaultAlignment, bool signal = true);
+void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
// Does not accept NULL.
void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);