summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2018-01-17 23:20:36 +0000
committerAlex Shlyapnikov <alekseys@google.com>2018-01-17 23:20:36 +0000
commit6ae9c03ffaf72765033e3bec140b52741b0177aa (patch)
tree75e9316b11ef9017050e32747421c3b3674a1995 /lib
parent47982b47999810f261eaff6b599e661be092ae10 (diff)
[Sanitizers] Make common allocator agnostic to failure handling modes.
Summary: Make common allocator agnostic to failure handling modes and move the decision up to the particular sanitizer's allocator, where the context is available (call stack, parameters, return nullptr/crash mode etc.) It simplifies the common allocator and allows the particular sanitizer's allocator to generate more specific and detailed error reports (which will be implemented later). The behavior is largely the same, except one case, the violation of the common allocator's check for "size + alignment" overflow is now reportied as OOM instead of "bad request". It feels like a worthy tradeoff and "size + alignment" is huge in this case anyway (thus, can be interpreted as not enough memory to satisfy the request). There's also a Report() statement added there. Reviewers: eugenis Subscribers: kubamracek, llvm-commits, #sanitizers Differential Revision: https://reviews.llvm.org/D42198 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@322784 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/asan/asan_allocator.cc18
-rw-r--r--lib/hwasan/hwasan_allocator.cc4
-rw-r--r--lib/lsan/lsan_allocator.cc10
-rw-r--r--lib/msan/msan_allocator.cc14
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc13
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_combined.h12
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_internal.h2
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_secondary.h15
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc15
-rw-r--r--lib/tsan/rtl/tsan_mman.cc14
10 files changed, 60 insertions, 57 deletions
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index a437ae1cd..ee8af9758 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -398,7 +398,7 @@ struct Allocator {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
if (RssLimitExceeded())
- return AsanAllocator::FailureHandler::OnOOM();
+ return ReturnNullOrDieOnFailure::OnOOM();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
@@ -433,7 +433,7 @@ struct Allocator {
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
- return AsanAllocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
AsanThread *t = GetCurrentThread();
@@ -446,8 +446,8 @@ struct Allocator {
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, needed_size, 8);
}
- if (!allocated)
- return nullptr;
+ if (UNLIKELY(!allocated))
+ return ReturnNullOrDieOnFailure::OnOOM();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
@@ -660,8 +660,8 @@ struct Allocator {
}
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
- if (CheckForCallocOverflow(size, nmemb))
- return AsanAllocator::FailureHandler::OnBadRequest();
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
+ return ReturnNullOrDieOnFailure::OnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
@@ -883,7 +883,7 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
errno = errno_ENOMEM;
- return AsanAllocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
// pvalloc(0) should allocate one page.
size = size ? RoundUpTo(size, PageSize) : PageSize;
@@ -895,7 +895,7 @@ void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
- return AsanAllocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
return SetErrnoOnNull(
instance.Allocate(size, alignment, stack, alloc_type, true));
@@ -904,7 +904,7 @@ void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
- AsanAllocator::FailureHandler::OnBadRequest();
+ ReturnNullOrDieOnFailure::OnBadRequest();
return errno_EINVAL;
}
void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
diff --git a/lib/hwasan/hwasan_allocator.cc b/lib/hwasan/hwasan_allocator.cc
index 2acac8189..5bd46f8f5 100644
--- a/lib/hwasan/hwasan_allocator.cc
+++ b/lib/hwasan/hwasan_allocator.cc
@@ -128,7 +128,7 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (size > kMaxAllowedMallocSize) {
Report("WARNING: HWAddressSanitizer failed to allocate %p bytes\n",
(void *)size);
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
HwasanThread *t = GetCurrentThread();
void *allocated;
@@ -140,6 +140,8 @@ static void *HwasanAllocate(StackTrace *stack, uptr size, uptr alignment,
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, size, alignment);
}
+ if (UNLIKELY(!allocated))
+ return ReturnNullOrDieOnFailure::OnOOM();
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
meta->state = CHUNK_ALLOCATED;
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index 2df58b44f..f5e4b5803 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -76,9 +76,11 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
size = 1;
if (size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
+ if (UNLIKELY(!p))
+ return ReturnNullOrDieOnFailure::OnOOM();
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
@@ -90,7 +92,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
size *= nmemb;
return Allocate(stack, size, 1, true);
}
@@ -108,7 +110,7 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
allocator.Deallocate(GetAllocatorCache(), p);
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
@@ -129,7 +131,7 @@ uptr GetMallocUsableSize(const void *p) {
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
}
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index 0f9942324..8a23edf2c 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -141,7 +141,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
MsanThread *t = GetCurrentThread();
void *allocated;
@@ -153,6 +153,8 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
AllocatorCache *cache = &fallback_allocator_cache;
allocated = allocator.Allocate(cache, size, alignment);
}
+ if (UNLIKELY(!allocated))
+ return ReturnNullOrDieOnFailure::OnOOM();
Metadata *meta =
reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
meta->requested_size = size;
@@ -236,7 +238,7 @@ void *msan_malloc(uptr size, StackTrace *stack) {
void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
- return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
+ return SetErrnoOnNull(ReturnNullOrDieOnFailure::OnBadRequest());
return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true));
}
@@ -258,7 +260,7 @@ void *msan_pvalloc(uptr size, StackTrace *stack) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
errno = errno_ENOMEM;
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
// pvalloc(0) should allocate one page.
size = size ? RoundUpTo(size, PageSize) : PageSize;
@@ -268,7 +270,7 @@ void *msan_pvalloc(uptr size, StackTrace *stack) {
void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
errno = errno_EINVAL;
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
}
@@ -276,7 +278,7 @@ void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
}
@@ -284,7 +286,7 @@ void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
- Allocator::FailureHandler::OnBadRequest();
+ ReturnNullOrDieOnFailure::OnBadRequest();
return errno_EINVAL;
}
void *ptr = MsanAllocate(stack, size, alignment, false);
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index fc4f7a75a..0642ee426 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -140,8 +140,8 @@ void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
if (size + sizeof(u64) < size)
return nullptr;
void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
- if (!p)
- return nullptr;
+ if (UNLIKELY(!p))
+ return DieOnFailure::OnOOM();
((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64);
}
@@ -155,16 +155,17 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
size = size + sizeof(u64);
CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
void *p = RawInternalRealloc(addr, size, cache);
- if (!p)
- return nullptr;
+ if (UNLIKELY(!p))
+ return DieOnFailure::OnOOM();
return (char*)p + sizeof(u64);
}
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (UNLIKELY(CheckForCallocOverflow(count, size)))
- return InternalAllocator::FailureHandler::OnBadRequest();
+ return DieOnFailure::OnBadRequest();
void *p = InternalAlloc(count * size, cache);
- if (p) internal_memset(p, 0, count * size);
+ if (LIKELY(p))
+ internal_memset(p, 0, count * size);
return p;
}
diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h
index 0d8a2a174..1f874d60b 100644
--- a/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -24,8 +24,6 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- typedef typename SecondaryAllocator::FailureHandler FailureHandler;
-
void InitLinkerInitialized(s32 release_to_os_interval_ms) {
primary_.Init(release_to_os_interval_ms);
secondary_.InitLinkerInitialized();
@@ -42,8 +40,12 @@ class CombinedAllocator {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
- if (size + alignment < size)
- return FailureHandler::OnBadRequest();
+ if (size + alignment < size) {
+ Report("WARNING: %s: CombinedAllocator allocation overflow: "
+ "0x%zx bytes with 0x%zx alignment requested\n",
+ SanitizerToolName, size, alignment);
+ return nullptr;
+ }
uptr original_size = size;
// If alignment requirements are to be fulfilled by the frontend allocator
// rather than by the primary or secondary, passing an alignment lower than
@@ -62,8 +64,6 @@ class CombinedAllocator {
res = cache->Allocate(&primary_, primary_.ClassID(size));
else
res = secondary_.Allocate(&stats_, original_size, alignment);
- if (!res)
- return FailureHandler::OnOOM();
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
return res;
diff --git a/lib/sanitizer_common/sanitizer_allocator_internal.h b/lib/sanitizer_common/sanitizer_allocator_internal.h
index a791d0d94..10536a965 100644
--- a/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -47,7 +47,7 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
- LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure>
+ LargeMmapAllocator<NoOpMapUnmapCallback>
> InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
diff --git a/lib/sanitizer_common/sanitizer_allocator_secondary.h b/lib/sanitizer_common/sanitizer_allocator_secondary.h
index 261dfb5e1..fb3182c98 100644
--- a/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -17,12 +17,9 @@
// This class can (de)allocate only large chunks of memory using mmap/unmap.
// The main purpose of this allocator is to cover large and rare allocation
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback,
- class FailureHandlerT = ReturnNullOrDieOnFailure>
+template <class MapUnmapCallback = NoOpMapUnmapCallback>
class LargeMmapAllocator {
public:
- typedef FailureHandlerT FailureHandler;
-
void InitLinkerInitialized() {
page_size_ = GetPageSizeCached();
}
@@ -38,12 +35,16 @@ class LargeMmapAllocator {
if (alignment > page_size_)
map_size += alignment;
// Overflow.
- if (map_size < size)
- return FailureHandler::OnBadRequest();
+ if (map_size < size) {
+ Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
+ "0x%zx bytes with 0x%zx alignment requested\n",
+ SanitizerToolName, map_size, alignment);
+ return nullptr;
+ }
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
if (!map_beg)
- return FailureHandler::OnOOM();
+ return nullptr;
CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index 7b5e3e21f..cc7892b63 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -444,7 +444,7 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
TestMapUnmapCallback::map_count = 0;
TestMapUnmapCallback::unmap_count = 0;
- LargeMmapAllocator<TestMapUnmapCallback, DieOnFailure> a;
+ LargeMmapAllocator<TestMapUnmapCallback> a;
a.Init();
AllocatorStats stats;
stats.Init();
@@ -482,7 +482,7 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
#endif
TEST(SanitizerCommon, LargeMmapAllocator) {
- LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
+ LargeMmapAllocator<NoOpMapUnmapCallback> a;
a.Init();
AllocatorStats stats;
stats.Init();
@@ -565,7 +565,6 @@ void TestCombinedAllocator() {
typedef
CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
Allocator;
- SetAllocatorMayReturnNull(true);
Allocator *a = new Allocator;
a->Init(kReleaseToOSIntervalNever);
std::mt19937 r;
@@ -579,11 +578,7 @@ void TestCombinedAllocator() {
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
-
- // Set to false
- SetAllocatorMayReturnNull(false);
- EXPECT_DEATH(a->Allocate(&cache, -1, 1),
- "allocator is terminating the process");
+ EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
const uptr kNumAllocs = 100000;
const uptr kNumIter = 10;
@@ -893,7 +888,7 @@ TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
}
TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
- LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
+ LargeMmapAllocator<NoOpMapUnmapCallback> a;
a.Init();
AllocatorStats stats;
stats.Init();
@@ -920,7 +915,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
}
TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
- LargeMmapAllocator<NoOpMapUnmapCallback, DieOnFailure> a;
+ LargeMmapAllocator<NoOpMapUnmapCallback> a;
a.Init();
AllocatorStats stats;
stats.Init();
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 19680238b..39c0d8607 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -153,10 +153,10 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (UNLIKELY(p == 0))
- return 0;
+ return ReturnNullOrDieOnFailure::OnOOM();
if (ctx && ctx->initialized)
OnUserAlloc(thr, pc, (uptr)p, sz, true);
if (signal)
@@ -179,7 +179,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
if (UNLIKELY(CheckForCallocOverflow(size, n)))
- return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
+ return SetErrnoOnNull(ReturnNullOrDieOnFailure::OnBadRequest());
void *p = user_alloc_internal(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);
@@ -224,7 +224,7 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
if (UNLIKELY(!IsPowerOfTwo(align))) {
errno = errno_EINVAL;
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
}
@@ -232,7 +232,7 @@ void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
uptr sz) {
if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
- Allocator::FailureHandler::OnBadRequest();
+ ReturnNullOrDieOnFailure::OnBadRequest();
return errno_EINVAL;
}
void *ptr = user_alloc_internal(thr, pc, sz, align);
@@ -246,7 +246,7 @@ int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
errno = errno_EINVAL;
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
}
@@ -259,7 +259,7 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
errno = errno_ENOMEM;
- return Allocator::FailureHandler::OnBadRequest();
+ return ReturnNullOrDieOnFailure::OnBadRequest();
}
// pvalloc(0) should allocate one page.
sz = sz ? RoundUpTo(sz, PageSize) : PageSize;