From 13c2d08571baab9982f01acb1d302049bdace581 Mon Sep 17 00:00:00 2001 From: Alex Shlyapnikov Date: Tue, 18 Jul 2017 19:11:04 +0000 Subject: [Sanitizers] ASan/MSan/LSan allocators set errno on failure. Summary: ASan/MSan/LSan allocators set errno on allocation failures according to malloc/calloc/etc. expected behavior. MSan allocator was refactored a bit to make its structure more similar with other allocators. Also switch Scudo allocator to the internal errno definitions. TSan allocator changes will follow. Reviewers: eugenis Subscribers: llvm-commits, kubamracek Differential Revision: https://reviews.llvm.org/D35275 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@308344 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/asan/asan_allocator.cc | 27 +++---- lib/lsan/lsan_allocator.cc | 17 ++--- lib/msan/msan.h | 14 +++- lib/msan/msan_allocator.cc | 85 ++++++++++++++++++----- lib/msan/msan_interceptors.cc | 49 +++++-------- lib/msan/msan_new_delete.cc | 2 +- lib/sanitizer_common/sanitizer_allocator.cc | 9 +-- lib/sanitizer_common/sanitizer_allocator.h | 5 -- lib/sanitizer_common/sanitizer_allocator_checks.h | 64 +++++++++++++++++ lib/scudo/scudo_allocator.cpp | 31 ++++----- lib/tsan/rtl/tsan_mman.cc | 1 + 11 files changed, 193 insertions(+), 111 deletions(-) create mode 100644 lib/sanitizer_common/sanitizer_allocator_checks.h (limited to 'lib') diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc index 0c7c7465f..92963ddfc 100644 --- a/lib/asan/asan_allocator.cc +++ b/lib/asan/asan_allocator.cc @@ -21,6 +21,7 @@ #include "asan_report.h" #include "asan_stack.h" #include "asan_thread.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_flags.h" @@ -809,23 +810,17 @@ void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, instance.Deallocate(ptr, size, stack, alloc_type); } -inline void *check_ptr(void *ptr) { - if (UNLIKELY(!ptr)) - errno = errno_ENOMEM; - return ptr; -} - void *asan_malloc(uptr size, BufferedStackTrace *stack) { - return check_ptr(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); + return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); } void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { - return check_ptr(instance.Calloc(nmemb, size, stack)); + return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); } void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { if (!p) - return check_ptr(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); + return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true)); if (size == 0) { if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { instance.Deallocate(p, 0, stack, FROM_MALLOC); @@ -834,11 +829,11 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { // Allocate a size of 1 if we shouldn't free() on Realloc to 0 size = 1; } - return check_ptr(instance.Reallocate(p, size, stack)); + return SetErrnoOnNull(instance.Reallocate(p, size, stack)); } void *asan_valloc(uptr size, BufferedStackTrace *stack) { - return check_ptr( + return SetErrnoOnNull( instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true)); } @@ -846,7 +841,8 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { uptr PageSize = GetPageSizeCached(); // pvalloc(0) should allocate one page. size = size ? RoundUpTo(size, PageSize) : PageSize; - return check_ptr(instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); + return SetErrnoOnNull( + instance.Allocate(size, PageSize, stack, FROM_MALLOC, true)); } void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, @@ -855,19 +851,18 @@ void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, errno = errno_EINVAL; return AsanAllocator::FailureHandler::OnBadRequest(); } - return check_ptr( + return SetErrnoOnNull( instance.Allocate(size, alignment, stack, alloc_type, true)); } int asan_posix_memalign(void **memptr, uptr alignment, uptr size, BufferedStackTrace *stack) { - if (UNLIKELY(!IsPowerOfTwo(alignment) || - (alignment % sizeof(void *)) != 0)) { // NOLINT + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { AsanAllocator::FailureHandler::OnBadRequest(); return errno_EINVAL; } void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); - if (!ptr) + if (UNLIKELY(!ptr)) return errno_ENOMEM; CHECK(IsAligned((uptr)ptr, alignment)); *memptr = ptr; diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc index 96d5cb6a9..2df58b44f 100644 --- a/lib/lsan/lsan_allocator.cc +++ b/lib/lsan/lsan_allocator.cc @@ -15,6 +15,7 @@ #include "lsan_allocator.h" #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_internal_defs.h" @@ -125,22 +126,16 @@ uptr GetMallocUsableSize(const void *p) { return m->requested_size; } -inline void *check_ptr(void *ptr) { - if (UNLIKELY(!ptr)) - errno = errno_ENOMEM; - return ptr; -} - void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { if (UNLIKELY(!IsPowerOfTwo(alignment))) { errno = errno_EINVAL; return Allocator::FailureHandler::OnBadRequest(); } - return check_ptr(Allocate(stack, size, alignment, kAlwaysClearMemory)); + return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); } void *lsan_malloc(uptr size, const StackTrace &stack) { - return check_ptr(Allocate(stack, size, 1, kAlwaysClearMemory)); + return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory)); } void lsan_free(void *p) { @@ -148,15 +143,15 @@ void lsan_free(void *p) { } void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { - return check_ptr(Reallocate(stack, p, size, 1)); + return SetErrnoOnNull(Reallocate(stack, p, size, 1)); } void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { - return check_ptr(Calloc(nmemb, size, stack)); + return SetErrnoOnNull(Calloc(nmemb, size, stack)); } void *lsan_valloc(uptr size, const StackTrace &stack) { - return check_ptr( + return SetErrnoOnNull( Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); } diff --git a/lib/msan/msan.h b/lib/msan/msan.h index 0709260ee..fa9c15b88 100644 --- a/lib/msan/msan.h +++ b/lib/msan/msan.h @@ -280,10 +280,18 @@ void InitializeInterceptors(); void MsanAllocatorInit(); void MsanAllocatorThreadFinish(); -void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size); -void *MsanReallocate(StackTrace *stack, void *oldp, uptr size, - uptr alignment, bool zeroise); void MsanDeallocate(StackTrace *stack, void *ptr); + +void *msan_malloc(uptr size, StackTrace *stack); +void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack); +void *msan_realloc(void *ptr, uptr size, StackTrace *stack); +void *msan_valloc(uptr size, StackTrace *stack); +void *msan_pvalloc(uptr size, StackTrace *stack); +void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack); +void *msan_memalign(uptr alignment, uptr size, StackTrace *stack); +int msan_posix_memalign(void **memptr, uptr alignment, uptr size, + StackTrace *stack); + void InstallTrapHandler(); void InstallAtExitHandler(); diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc index a92b7fd12..1034dbdf9 100644 --- a/lib/msan/msan_allocator.cc +++ b/lib/msan/msan_allocator.cc @@ -13,7 +13,9 @@ //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_errno.h" #include "msan.h" #include "msan_allocator.h" #include "msan_origin.h" @@ -194,20 +196,8 @@ void MsanDeallocate(StackTrace *stack, void *p) { } } -void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) { - if (CheckForCallocOverflow(size, nmemb)) - return Allocator::FailureHandler::OnBadRequest(); - return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true); -} - void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, - uptr alignment, bool zeroise) { - if (!old_p) - return MsanAllocate(stack, new_size, alignment, zeroise); - if (!new_size) { - MsanDeallocate(stack, old_p); - return nullptr; - } + uptr alignment) { Metadata *meta = reinterpret_cast(allocator.GetMetaData(old_p)); uptr old_size = meta->requested_size; uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); @@ -215,10 +205,7 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, // We are not reallocating here. meta->requested_size = new_size; if (new_size > old_size) { - if (zeroise) { - __msan_clear_and_unpoison((char *)old_p + old_size, - new_size - old_size); - } else if (flags()->poison_in_malloc) { + if (flags()->poison_in_malloc) { stack->tag = StackTrace::TAG_ALLOC; PoisonMemory((char *)old_p + old_size, new_size - old_size, stack); } @@ -226,8 +213,7 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, return old_p; } uptr memcpy_size = Min(new_size, old_size); - void *new_p = MsanAllocate(stack, new_size, alignment, zeroise); - // Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size); + void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/); if (new_p) { CopyMemory(new_p, old_p, memcpy_size, stack); MsanDeallocate(stack, old_p); @@ -243,6 +229,67 @@ static uptr AllocationSize(const void *p) { return b->requested_size; } +void *msan_malloc(uptr size, StackTrace *stack) { + return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false)); +} + +void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) + return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest()); + return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true)); +} + +void *msan_realloc(void *ptr, uptr size, StackTrace *stack) { + if (!ptr) + return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false)); + if (size == 0) { + MsanDeallocate(stack, ptr); + return nullptr; + } + return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64))); +} + +void *msan_valloc(uptr size, StackTrace *stack) { + return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false)); +} + +void *msan_pvalloc(uptr size, StackTrace *stack) { + uptr PageSize = GetPageSizeCached(); + // pvalloc(0) should allocate one page. + size = size == 0 ? PageSize : RoundUpTo(size, PageSize); + return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false)); +} + +void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + return Allocator::FailureHandler::OnBadRequest(); + } + return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); +} + +void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + return Allocator::FailureHandler::OnBadRequest(); + } + return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false)); +} + +int msan_posix_memalign(void **memptr, uptr alignment, uptr size, + StackTrace *stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + Allocator::FailureHandler::OnBadRequest(); + return errno_EINVAL; + } + void *ptr = MsanAllocate(stack, size, alignment, false); + if (UNLIKELY(!ptr)) + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + } // namespace __msan using namespace __msan; diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc index 069777c7f..b5d22baca 100644 --- a/lib/msan/msan_interceptors.cc +++ b/lib/msan/msan_interceptors.cc @@ -161,58 +161,45 @@ INTERCEPTOR(void *, bcopy, const void *src, void *dest, SIZE_T n) { INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) { GET_MALLOC_STACK_TRACE; - CHECK_EQ(alignment & (alignment - 1), 0); CHECK_NE(memptr, 0); - *memptr = MsanReallocate(&stack, nullptr, size, alignment, false); - CHECK_NE(*memptr, 0); - __msan_unpoison(memptr, sizeof(*memptr)); - return 0; + int res = msan_posix_memalign(memptr, alignment, size, &stack); + if (!res) + __msan_unpoison(memptr, sizeof(*memptr)); + return res; } #if !SANITIZER_FREEBSD -INTERCEPTOR(void *, memalign, SIZE_T boundary, SIZE_T size) { +INTERCEPTOR(void *, memalign, SIZE_T alignment, SIZE_T size) { GET_MALLOC_STACK_TRACE; - CHECK_EQ(boundary & (boundary - 1), 0); - void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false); - return ptr; + return msan_memalign(alignment, size, &stack); } #define MSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign) #else #define MSAN_MAYBE_INTERCEPT_MEMALIGN #endif -INTERCEPTOR(void *, aligned_alloc, SIZE_T boundary, SIZE_T size) { +INTERCEPTOR(void *, aligned_alloc, SIZE_T alignment, SIZE_T size) { GET_MALLOC_STACK_TRACE; - CHECK_EQ(boundary & (boundary - 1), 0); - void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false); - return ptr; + return msan_aligned_alloc(alignment, size, &stack); } -INTERCEPTOR(void *, __libc_memalign, SIZE_T boundary, SIZE_T size) { +INTERCEPTOR(void *, __libc_memalign, SIZE_T alignment, SIZE_T size) { GET_MALLOC_STACK_TRACE; - CHECK_EQ(boundary & (boundary - 1), 0); - void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false); - DTLS_on_libc_memalign(ptr, size); + void *ptr = msan_memalign(alignment, size, &stack); + if (ptr) + DTLS_on_libc_memalign(ptr, size); return ptr; } INTERCEPTOR(void *, valloc, SIZE_T size) { GET_MALLOC_STACK_TRACE; - void *ptr = MsanReallocate(&stack, nullptr, size, GetPageSizeCached(), false); - return ptr; + return msan_valloc(size, &stack); } #if !SANITIZER_FREEBSD INTERCEPTOR(void *, pvalloc, SIZE_T size) { GET_MALLOC_STACK_TRACE; - uptr PageSize = GetPageSizeCached(); - size = RoundUpTo(size, PageSize); - if (size == 0) { - // pvalloc(0) should allocate one page. - size = PageSize; - } - void *ptr = MsanReallocate(&stack, nullptr, size, PageSize, false); - return ptr; + return msan_pvalloc(size, &stack); } #define MSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc) #else @@ -853,7 +840,7 @@ INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) { if (UNLIKELY(!msan_inited)) // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. return AllocateFromLocalPool(nmemb * size); - return MsanCalloc(&stack, nmemb, size); + return msan_calloc(nmemb, size, &stack); } INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) { @@ -866,12 +853,12 @@ INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) { new_ptr = AllocateFromLocalPool(copy_size); } else { copy_size = size; - new_ptr = MsanReallocate(&stack, nullptr, copy_size, sizeof(u64), false); + new_ptr = msan_malloc(copy_size, &stack); } internal_memcpy(new_ptr, ptr, copy_size); return new_ptr; } - return MsanReallocate(&stack, ptr, size, sizeof(u64), false); + return msan_realloc(ptr, size, &stack); } INTERCEPTOR(void *, malloc, SIZE_T size) { @@ -879,7 +866,7 @@ INTERCEPTOR(void *, malloc, SIZE_T size) { if (UNLIKELY(!msan_inited)) // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. return AllocateFromLocalPool(size); - return MsanReallocate(&stack, nullptr, size, sizeof(u64), false); + return msan_malloc(size, &stack); } void __msan_allocated_memory(const void *data, uptr size) { diff --git a/lib/msan/msan_new_delete.cc b/lib/msan/msan_new_delete.cc index c7295feeb..721926791 100644 --- a/lib/msan/msan_new_delete.cc +++ b/lib/msan/msan_new_delete.cc @@ -31,7 +31,7 @@ namespace std { // TODO(alekseys): throw std::bad_alloc instead of dying on OOM. #define OPERATOR_NEW_BODY(nothrow) \ GET_MALLOC_STACK_TRACE; \ - void *res = MsanReallocate(&stack, 0, size, sizeof(u64), false);\ + void *res = msan_malloc(size, &stack);\ if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\ return res diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc index 2f8f6e3f9..84f523c5e 100644 --- a/lib/sanitizer_common/sanitizer_allocator.cc +++ b/lib/sanitizer_common/sanitizer_allocator.cc @@ -14,6 +14,7 @@ #include "sanitizer_allocator.h" +#include "sanitizer_allocator_checks.h" #include "sanitizer_allocator_internal.h" #include "sanitizer_atomic.h" #include "sanitizer_common.h" @@ -160,7 +161,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) { } void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) { - if (CheckForCallocOverflow(count, size)) + if (UNLIKELY(CheckForCallocOverflow(count, size))) return InternalAllocator::FailureHandler::OnBadRequest(); void *p = InternalAlloc(count * size, cache); if (p) internal_memset(p, 0, count * size); @@ -202,12 +203,6 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) { low_level_alloc_callback = callback; } -bool CheckForCallocOverflow(uptr size, uptr n) { - if (!size) return false; - uptr max = (uptr)-1L; - return (max / size) < n; -} - static atomic_uint8_t allocator_out_of_memory = {0}; static atomic_uint8_t allocator_may_return_null = {0}; diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h index 0fb8a087e..8c5696ea7 100644 --- a/lib/sanitizer_common/sanitizer_allocator.h +++ b/lib/sanitizer_common/sanitizer_allocator.h @@ -56,11 +56,6 @@ struct NoOpMapUnmapCallback { // Callback type for iterating over chunks. typedef void (*ForEachChunkCallback)(uptr chunk, void *arg); -// Returns true if calloc(size, n) call overflows on size*n calculation. -// The caller should "return POLICY::OnBadRequest();" where POLICY is the -// current allocator failure handling policy. -bool CheckForCallocOverflow(uptr size, uptr n); - #include "sanitizer_allocator_size_class_map.h" #include "sanitizer_allocator_stats.h" #include "sanitizer_allocator_primary64.h" diff --git a/lib/sanitizer_common/sanitizer_allocator_checks.h b/lib/sanitizer_common/sanitizer_allocator_checks.h new file mode 100644 index 000000000..202916eae --- /dev/null +++ b/lib/sanitizer_common/sanitizer_allocator_checks.h @@ -0,0 +1,64 @@ +//===-- sanitizer_allocator_checks.h ----------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory +// allocators. +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_ALLOCATOR_CHECKS_H +#define SANITIZER_ALLOCATOR_CHECKS_H + +#include "sanitizer_errno.h" +#include "sanitizer_internal_defs.h" +#include "sanitizer_common.h" +#include "sanitizer_platform.h" + +namespace __sanitizer { + +// A common errno setting logic shared by almost all sanitizer allocator APIs. +INLINE void *SetErrnoOnNull(void *ptr) { + if (UNLIKELY(!ptr)) + errno = errno_ENOMEM; + return ptr; +} + +// In case of the check failure, the caller of the following Check... functions +// should "return POLICY::OnBadRequest();" where POLICY is the current allocator +// failure handling policy. + +// Checks aligned_alloc() parameters, verifies that the alignment is a power of +// two and that the size is a multiple of alignment for POSIX implementation, +// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple +// of alignment. +INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) { +#if SANITIZER_POSIX + return IsPowerOfTwo(alignment) && (size & (alignment - 1)) == 0; +#else + return size % alignment == 0; +#endif +} + +// Checks posix_memalign() parameters, verifies that alignment is a power of two +// and a multiple of sizeof(void *). +INLINE bool CheckPosixMemalignAlignment(uptr alignment) { + return IsPowerOfTwo(alignment) && (alignment % sizeof(void *)) == 0; // NOLINT +} + +// Returns true if calloc(size, n) call overflows on size*n calculation. +INLINE bool CheckForCallocOverflow(uptr size, uptr n) { + if (!size) + return false; + uptr max = (uptr)-1L; + return (max / size) < n; +} + +} // namespace __sanitizer + +#endif // SANITIZER_ALLOCATOR_CHECKS_H diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp index 102d1d0df..6f30ee987 100644 --- a/lib/scudo/scudo_allocator.cpp +++ b/lib/scudo/scudo_allocator.cpp @@ -19,6 +19,7 @@ #include "scudo_tls.h" #include "scudo_utils.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_errno.h" #include "sanitizer_common/sanitizer_quarantine.h" @@ -638,14 +639,8 @@ void ScudoThreadContext::commitBack() { Instance.commitBack(this); } -INLINE void *checkPtr(void *Ptr) { - if (UNLIKELY(!Ptr)) - errno = errno_ENOMEM; - return Ptr; -} - void *scudoMalloc(uptr Size, AllocType Type) { - return checkPtr(Instance.allocate(Size, MinAlignment, Type)); + return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type)); } void scudoFree(void *Ptr, AllocType Type) { @@ -658,27 +653,28 @@ void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) { void *scudoRealloc(void *Ptr, uptr Size) { if (!Ptr) - return checkPtr(Instance.allocate(Size, MinAlignment, FromMalloc)); + return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc)); if (Size == 0) { Instance.deallocate(Ptr, 0, FromMalloc); return nullptr; } - return checkPtr(Instance.reallocate(Ptr, Size)); + return SetErrnoOnNull(Instance.reallocate(Ptr, Size)); } void *scudoCalloc(uptr NMemB, uptr Size) { - return checkPtr(Instance.calloc(NMemB, Size)); + return SetErrnoOnNull(Instance.calloc(NMemB, Size)); } void *scudoValloc(uptr Size) { - return checkPtr(Instance.allocate(Size, GetPageSizeCached(), FromMemalign)); + return SetErrnoOnNull( + Instance.allocate(Size, GetPageSizeCached(), FromMemalign)); } void *scudoPvalloc(uptr Size) { uptr PageSize = GetPageSizeCached(); // pvalloc(0) should allocate one page. Size = Size ? RoundUpTo(Size, PageSize) : PageSize; - return checkPtr(Instance.allocate(Size, PageSize, FromMemalign)); + return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign)); } void *scudoMemalign(uptr Alignment, uptr Size) { @@ -686,28 +682,27 @@ void *scudoMemalign(uptr Alignment, uptr Size) { errno = errno_EINVAL; return ScudoAllocator::FailureHandler::OnBadRequest(); } - return checkPtr(Instance.allocate(Size, Alignment, FromMemalign)); + return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign)); } int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { - if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Alignment % sizeof(void *)) != 0)) { + if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) { ScudoAllocator::FailureHandler::OnBadRequest(); return errno_EINVAL; } void *Ptr = Instance.allocate(Size, Alignment, FromMemalign); - if (!Ptr) + if (UNLIKELY(!Ptr)) return errno_ENOMEM; *MemPtr = Ptr; return 0; } void *scudoAlignedAlloc(uptr Alignment, uptr Size) { - // Alignment must be a power of 2, Size must be a multiple of Alignment. - if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Size & (Alignment - 1)) != 0)) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) { errno = errno_EINVAL; return ScudoAllocator::FailureHandler::OnBadRequest(); } - return checkPtr(Instance.allocate(Size, Alignment, FromMalloc)); + return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc)); } uptr scudoMallocUsableSize(void *Ptr) { diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc index 1434cf688..f79dccddb 100644 --- a/lib/tsan/rtl/tsan_mman.cc +++ b/lib/tsan/rtl/tsan_mman.cc @@ -10,6 +10,7 @@ // This file is a part of ThreadSanitizer (TSan), a race detector. // //===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_allocator_checks.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_placement_new.h" -- cgit v1.2.3