summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/asan/asan_allocator.cc17
-rw-r--r--lib/lsan/lsan_allocator.cc2
-rw-r--r--lib/msan/msan_allocator.cc9
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc47
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h26
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_combined.h49
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_internal.h3
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_secondary.h35
-rw-r--r--lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc2
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc21
-rw-r--r--lib/scudo/scudo_allocator.cpp15
-rw-r--r--lib/scudo/scudo_allocator_combined.h18
-rw-r--r--lib/scudo/scudo_allocator_secondary.h12
-rw-r--r--lib/tsan/rtl/tsan_mman.cc9
14 files changed, 127 insertions, 138 deletions
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index a93a196cb..57651f49c 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -266,7 +266,8 @@ struct Allocator {
}
void Initialize(const AllocatorOptions &options) {
- allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
+ SetAllocatorMayReturnNull(options.may_return_null);
+ allocator.Init(options.release_to_os_interval_ms);
SharedInitCode(options);
}
@@ -302,7 +303,7 @@ struct Allocator {
}
void ReInitialize(const AllocatorOptions &options) {
- allocator.SetMayReturnNull(options.may_return_null);
+ SetAllocatorMayReturnNull(options.may_return_null);
allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
SharedInitCode(options);
@@ -323,7 +324,7 @@ struct Allocator {
options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
- options->may_return_null = allocator.MayReturnNull();
+ options->may_return_null = AllocatorMayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
@@ -374,7 +375,7 @@ struct Allocator {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
if (RssLimitExceeded())
- return allocator.ReturnNullOrDieOnOOM();
+ return AsanAllocator::FailureHandler::OnOOM();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
@@ -407,7 +408,7 @@ struct Allocator {
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
- return allocator.ReturnNullOrDieOnBadRequest();
+ return AsanAllocator::FailureHandler::OnBadRequest();
}
AsanThread *t = GetCurrentThread();
@@ -420,8 +421,8 @@ struct Allocator {
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8);
}
-
- if (!allocated) return allocator.ReturnNullOrDieOnOOM();
+ if (!allocated)
+ return nullptr;
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
@@ -632,7 +633,7 @@ struct Allocator {
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDieOnBadRequest();
+ return AsanAllocator::FailureHandler::OnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index acff1f5ae..f54e95373 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -38,8 +38,8 @@ typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
static Allocator allocator;
void InitializeAllocator() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
allocator.InitLinkerInitialized(
- common_flags()->allocator_may_return_null,
common_flags()->allocator_release_to_os_interval_ms);
}
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index 8d6cc69a4..d0f478afc 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -119,9 +119,8 @@ static AllocatorCache fallback_allocator_cache;
static SpinMutex fallback_mutex;
void MsanAllocatorInit() {
- allocator.Init(
- common_flags()->allocator_may_return_null,
- common_flags()->allocator_release_to_os_interval_ms);
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
}
AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
@@ -139,7 +138,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
- return allocator.ReturnNullOrDieOnBadRequest();
+ return Allocator::FailureHandler::OnBadRequest();
}
MsanThread *t = GetCurrentThread();
void *allocated;
@@ -197,7 +196,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDieOnBadRequest();
+ return Allocator::FailureHandler::OnBadRequest();
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
}
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index 15d01de86..db3ebb033 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -94,8 +94,7 @@ InternalAllocator *internal_allocator() {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
- internal_allocator_instance->Init(
- /* may_return_null */ false, kReleaseToOSIntervalNever);
+ internal_allocator_instance->Init(kReleaseToOSIntervalNever);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
@@ -162,7 +161,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (CallocShouldReturnNullDueToOverflow(count, size))
- return internal_allocator()->ReturnNullOrDieOnBadRequest();
+ return InternalAllocator::FailureHandler::OnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
@@ -209,12 +208,15 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
-static atomic_uint8_t reporting_out_of_memory = {0};
+static atomic_uint8_t allocator_out_of_memory = {0};
+static atomic_uint8_t allocator_may_return_null = {0};
-bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
+bool IsAllocatorOutOfMemory() {
+ return atomic_load_relaxed(&allocator_out_of_memory);
+}
-void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
- if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull() {
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
@@ -222,4 +224,35 @@ void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
Die();
}
+bool AllocatorMayReturnNull() {
+ return atomic_load(&allocator_may_return_null, memory_order_relaxed);
+}
+
+void SetAllocatorMayReturnNull(bool may_return_null) {
+ atomic_store(&allocator_may_return_null, may_return_null,
+ memory_order_relaxed);
+}
+
+void *ReturnNullOrDieOnFailure::OnBadRequest() {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull();
+}
+
+void *ReturnNullOrDieOnFailure::OnOOM() {
+ atomic_store_relaxed(&allocator_out_of_memory, 1);
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull();
+}
+
+void *DieOnFailure::OnBadRequest() {
+ ReportAllocatorCannotReturnNull();
+}
+
+void *DieOnFailure::OnOOM() {
+ atomic_store_relaxed(&allocator_out_of_memory, 1);
+ ReportAllocatorCannotReturnNull();
+}
+
} // namespace __sanitizer
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index 9a37a2f21..f59c13d1c 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -24,12 +24,28 @@
namespace __sanitizer {
-// Returns true if ReportAllocatorCannotReturnNull(true) was called.
-// Can be use to avoid memory hungry operations.
-bool IsReportingOOM();
+// Since flags are immutable and allocator behavior can be changed at runtime
+// (unit tests or ASan on Android are some examples), allocator_may_return_null
+// flag value is cached here and can be altered later.
+bool AllocatorMayReturnNull();
+void SetAllocatorMayReturnNull(bool may_return_null);
-// Prints error message and kills the program.
-void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
+// Allocator failure handling policies:
+// Implements AllocatorMayReturnNull policy, returns null when the flag is set,
+// dies otherwise.
+struct ReturnNullOrDieOnFailure {
+ static void *OnBadRequest();
+ static void *OnOOM();
+};
+// Always dies on the failure.
+struct DieOnFailure {
+ static void *OnBadRequest();
+ static void *OnOOM();
+};
+
+// Returns true if allocator detected OOM condition. Can be used to avoid memory
+// hungry operations. Set when AllocatorReturnNullOrDieOnOOM() is called.
+bool IsAllocatorOutOfMemory();
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h
index 90ccb1771..efd25cadf 100644
--- a/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -24,22 +24,18 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
- primary_.Init(release_to_os_interval_ms);
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
+ typedef typename SecondaryAllocator::FailureHandler FailureHandler;
- void InitLinkerInitialized(
- bool may_return_null, s32 release_to_os_interval_ms) {
- secondary_.InitLinkerInitialized(may_return_null);
+ void InitLinkerInitialized(s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.InitLinkerInitialized();
stats_.InitLinkerInitialized();
- InitCommon(may_return_null, release_to_os_interval_ms);
}
- void Init(bool may_return_null, s32 release_to_os_interval_ms) {
- secondary_.Init(may_return_null);
+ void Init(s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.Init();
stats_.Init();
- InitCommon(may_return_null, release_to_os_interval_ms);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
@@ -47,7 +43,7 @@ class CombinedAllocator {
if (size == 0)
size = 1;
if (size + alignment < size)
- return ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
uptr original_size = size;
// If alignment requirements are to be fulfilled by the frontend allocator
// rather than by the primary or secondary, passing an alignment lower than
@@ -55,44 +51,24 @@ class CombinedAllocator {
// alignment check.
if (alignment > 8)
size = RoundUpTo(size, alignment);
- void *res;
- bool from_primary = primary_.CanAllocate(size, alignment);
// The primary allocator should return a 2^x aligned allocation when
// requested 2^x bytes, hence using the rounded up 'size' when being
// serviced by the primary (this is no longer true when the primary is
// using a non-fixed base address). The secondary takes care of the
// alignment without such requirement, and allocating 'size' would use
// extraneous memory, so we employ 'original_size'.
- if (from_primary)
+ void *res;
+ if (primary_.CanAllocate(size, alignment))
res = cache->Allocate(&primary_, primary_.ClassID(size));
else
res = secondary_.Allocate(&stats_, original_size, alignment);
+ if (!res)
+ return FailureHandler::OnOOM();
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
return res;
}
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
-
- void SetMayReturnNull(bool may_return_null) {
- secondary_.SetMayReturnNull(may_return_null);
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
s32 ReleaseToOSIntervalMs() const {
return primary_.ReleaseToOSIntervalMs();
}
@@ -213,6 +189,5 @@ class CombinedAllocator {
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
- atomic_uint8_t may_return_null_;
};
diff --git a/lib/sanitizer_common/sanitizer_allocator_internal.h b/lib/sanitizer_common/sanitizer_allocator_internal.h
index d1890f20f..a791d0d94 100644
--- a/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -47,7 +47,8 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
- LargeMmapAllocator<> > InternalAllocator;
+ LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure>
+ > InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
uptr alignment = 0);
diff --git a/lib/sanitizer_common/sanitizer_allocator_secondary.h b/lib/sanitizer_common/sanitizer_allocator_secondary.h
index 2c69f47ec..261dfb5e1 100644
--- a/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -17,17 +17,19 @@
// This class can (de)allocate only large chunks of memory using mmap/unmap.
// The main purpose of this allocator is to cover large and rare allocation
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
+template <class MapUnmapCallback = NoOpMapUnmapCallback,
+ class FailureHandlerT = ReturnNullOrDieOnFailure>
class LargeMmapAllocator {
public:
- void InitLinkerInitialized(bool may_return_null) {
+ typedef FailureHandlerT FailureHandler;
+
+ void InitLinkerInitialized() {
page_size_ = GetPageSizeCached();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
- void Init(bool may_return_null) {
+ void Init() {
internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized(may_return_null);
+ InitLinkerInitialized();
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
@@ -37,11 +39,11 @@ class LargeMmapAllocator {
map_size += alignment;
// Overflow.
if (map_size < size)
- return ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
if (!map_beg)
- return ReturnNullOrDieOnOOM();
+ return FailureHandler::OnOOM();
CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
@@ -75,24 +77,6 @@ class LargeMmapAllocator {
return reinterpret_cast<void*>(res);
}
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
-
- void SetMayReturnNull(bool may_return_null) {
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
@@ -278,7 +262,6 @@ class LargeMmapAllocator {
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
- atomic_uint8_t may_return_null_;
SpinMutex mutex_;
};
diff --git a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
index d3c77b510..60ec7506c 100644
--- a/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
@@ -495,7 +495,7 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
VReport(2, "Symbolizer is disabled.\n");
return;
}
- if (IsReportingOOM()) {
+ if (IsAllocatorOutOfMemory()) {
VReport(2, "Cannot use internal symbolizer: out of memory\n");
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
VReport(2, "Using internal symbolizer.\n");
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index b28159a2a..f256d8776 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -426,8 +426,8 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
TestMapUnmapCallback::map_count = 0;
TestMapUnmapCallback::unmap_count = 0;
- LargeMmapAllocator<TestMapUnmapCallback> a;
- a.Init(/* may_return_null */ false);
+ LargeMmapAllocator<TestMapUnmapCallback, DieOnFailure> a;
+ a.Init();
AllocatorStats stats;
stats.Init();
void *x = a.Allocate(&stats, 1 << 20, 1);
@@ -463,8 +463,8 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
#endif
TEST(SanitizerCommon, LargeMmapAllocator) {
- LargeMmapAllocator<> a;
- a.Init(/* may_return_null */ false);
+ LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
+ a.Init();
AllocatorStats stats;
stats.Init();
@@ -546,8 +546,9 @@ void TestCombinedAllocator() {
typedef
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
Allocator;
+ SetAllocatorMayReturnNull(true);
Allocator *a = new Allocator;
- a->Init(/* may_return_null */ true, kReleaseToOSIntervalNever);
+ a->Init(kReleaseToOSIntervalNever);
std::mt19937 r;
AllocatorCache cache;
@@ -561,7 +562,7 @@ void TestCombinedAllocator() {
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
// Set to false
- a->SetMayReturnNull(false);
+ SetAllocatorMayReturnNull(false);
EXPECT_DEATH(a->Allocate(&cache, -1, 1),
"allocator is terminating the process");
@@ -873,8 +874,8 @@ TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
}
TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
- LargeMmapAllocator<> a;
- a.Init(/* may_return_null */ false);
+ LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
+ a.Init();
AllocatorStats stats;
stats.Init();
@@ -900,8 +901,8 @@ TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
}
TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
- LargeMmapAllocator<> a;
- a.Init(/* may_return_null */ false);
+ LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
+ a.Init();
AllocatorStats stats;
stats.Init();
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index ce69ddf55..1d0db84a5 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -273,6 +273,8 @@ struct ScudoAllocator {
static const uptr MaxAllowedMallocSize =
FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
+ typedef ReturnNullOrDieOnFailure FailureHandler;
+
ScudoBackendAllocator BackendAllocator;
ScudoQuarantine AllocatorQuarantine;
@@ -326,7 +328,8 @@ struct ScudoAllocator {
DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
DeleteSizeMismatch = Options.DeleteSizeMismatch;
ZeroContents = Options.ZeroContents;
- BackendAllocator.Init(Options.MayReturnNull, Options.ReleaseToOSIntervalMs);
+ SetAllocatorMayReturnNull(Options.MayReturnNull);
+ BackendAllocator.Init(Options.ReleaseToOSIntervalMs);
AllocatorQuarantine.Init(
static_cast<uptr>(Options.QuarantineSizeMb) << 20,
static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
@@ -354,11 +357,11 @@ struct ScudoAllocator {
dieWithMessage("ERROR: alignment is not a power of 2\n");
}
if (Alignment > MaxAlignment)
- return BackendAllocator.ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
if (Alignment < MinAlignment)
Alignment = MinAlignment;
if (Size >= MaxAllowedMallocSize)
- return BackendAllocator.ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
if (Size == 0)
Size = 1;
@@ -366,7 +369,7 @@ struct ScudoAllocator {
uptr AlignedSize = (Alignment > MinAlignment) ?
NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
if (AlignedSize >= MaxAllowedMallocSize)
- return BackendAllocator.ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
// Primary and Secondary backed allocations have a different treatment. We
// deal with alignment requirements of Primary serviced allocations here,
@@ -391,7 +394,7 @@ struct ScudoAllocator {
AllocationAlignment, FromPrimary);
}
if (!Ptr)
- return BackendAllocator.ReturnNullOrDieOnOOM();
+ return FailureHandler::OnOOM();
// If requested, we will zero out the entire contents of the returned chunk.
if ((ForceZeroContents || ZeroContents) && FromPrimary)
@@ -583,7 +586,7 @@ struct ScudoAllocator {
initThreadMaybe();
uptr Total = NMemB * Size;
if (Size != 0 && Total / Size != NMemB) // Overflow check
- return BackendAllocator.ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
return allocate(Total, MinAlignment, FromMalloc, true);
}
diff --git a/lib/scudo/scudo_allocator_combined.h b/lib/scudo/scudo_allocator_combined.h
index c978db55a..21c45897b 100644
--- a/lib/scudo/scudo_allocator_combined.h
+++ b/lib/scudo/scudo_allocator_combined.h
@@ -23,11 +23,10 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator>
class ScudoCombinedAllocator {
public:
- void Init(bool AllocatorMayReturnNull, s32 ReleaseToOSIntervalMs) {
+ void Init(s32 ReleaseToOSIntervalMs) {
Primary.Init(ReleaseToOSIntervalMs);
- Secondary.Init(AllocatorMayReturnNull);
+ Secondary.Init();
Stats.Init();
- atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
}
void *Allocate(AllocatorCache *Cache, uptr Size, uptr Alignment,
@@ -37,18 +36,6 @@ class ScudoCombinedAllocator {
return Secondary.Allocate(&Stats, Size, Alignment);
}
- void *ReturnNullOrDieOnBadRequest() {
- if (atomic_load_relaxed(&MayReturnNull))
- return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (atomic_load_relaxed(&MayReturnNull))
- return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
-
void Deallocate(AllocatorCache *Cache, void *Ptr, bool FromPrimary) {
if (FromPrimary)
Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr);
@@ -78,7 +65,6 @@ class ScudoCombinedAllocator {
PrimaryAllocator Primary;
SecondaryAllocator Secondary;
AllocatorGlobalStats Stats;
- atomic_uint8_t MayReturnNull;
};
#endif // SCUDO_ALLOCATOR_COMBINED_H_
diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h
index 2950909b5..dbfb22565 100644
--- a/lib/scudo/scudo_allocator_secondary.h
+++ b/lib/scudo/scudo_allocator_secondary.h
@@ -24,9 +24,8 @@
class ScudoLargeMmapAllocator {
public:
- void Init(bool AllocatorMayReturnNull) {
+ void Init() {
PageSize = GetPageSizeCached();
- atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
}
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
@@ -42,7 +41,7 @@ class ScudoLargeMmapAllocator {
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
if (MapBeg == ~static_cast<uptr>(0))
- return ReturnNullOrDieOnOOM();
+ return ReturnNullOrDieOnFailure::OnOOM();
// A page-aligned pointer is assumed after that, so check it now.
CHECK(IsAligned(MapBeg, PageSize));
uptr MapEnd = MapBeg + MapSize;
@@ -96,12 +95,6 @@ class ScudoLargeMmapAllocator {
return reinterpret_cast<void *>(Ptr);
}
- void *ReturnNullOrDieOnOOM() {
- if (atomic_load_relaxed(&MayReturnNull))
- return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
-
void Deallocate(AllocatorStats *Stats, void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
{
@@ -140,7 +133,6 @@ class ScudoLargeMmapAllocator {
const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
uptr PageSize;
SpinMutex StatsMutex;
- atomic_uint8_t MayReturnNull;
};
#endif // SCUDO_ALLOCATOR_SECONDARY_H_
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 2dea24915..fa0d0cafe 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -112,9 +112,8 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
}
void InitializeAllocator() {
- allocator()->Init(
- common_flags()->allocator_may_return_null,
- common_flags()->allocator_release_to_os_interval_ms);
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
}
void InitializeAllocatorLate() {
@@ -151,7 +150,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return allocator()->ReturnNullOrDieOnBadRequest();
+ return Allocator::FailureHandler::OnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (p == 0)
return 0;
@@ -164,7 +163,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
if (CallocShouldReturnNullDueToOverflow(size, n))
- return allocator()->ReturnNullOrDieOnBadRequest();
+ return Allocator::FailureHandler::OnBadRequest();
void *p = user_alloc(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);