summaryrefslogtreecommitdiff
path: root/lib/msan
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2017-07-18 19:11:04 +0000
committerAlex Shlyapnikov <alekseys@google.com>2017-07-18 19:11:04 +0000
commit13c2d08571baab9982f01acb1d302049bdace581 (patch)
treed284ec82188f89173bb52f4872c811dd5d02f733 /lib/msan
parente726963d587b1d95320da4f5f05cd563e36eb8fe (diff)
[Sanitizers] ASan/MSan/LSan allocators set errno on failure.
Summary: ASan/MSan/LSan allocators set errno on allocation failures according to malloc/calloc/etc. expected behavior. MSan allocator was refactored a bit to make its structure more similar with other allocators. Also switch Scudo allocator to the internal errno definitions. TSan allocator changes will follow. Reviewers: eugenis Subscribers: llvm-commits, kubamracek Differential Revision: https://reviews.llvm.org/D35275 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@308344 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/msan')
-rw-r--r--lib/msan/msan.h14
-rw-r--r--lib/msan/msan_allocator.cc85
-rw-r--r--lib/msan/msan_interceptors.cc49
-rw-r--r--lib/msan/msan_new_delete.cc2
4 files changed, 96 insertions, 54 deletions
diff --git a/lib/msan/msan.h b/lib/msan/msan.h
index 0709260ee..fa9c15b88 100644
--- a/lib/msan/msan.h
+++ b/lib/msan/msan.h
@@ -280,10 +280,18 @@ void InitializeInterceptors();
void MsanAllocatorInit();
void MsanAllocatorThreadFinish();
-void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size);
-void *MsanReallocate(StackTrace *stack, void *oldp, uptr size,
- uptr alignment, bool zeroise);
void MsanDeallocate(StackTrace *stack, void *ptr);
+
+void *msan_malloc(uptr size, StackTrace *stack);
+void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack);
+void *msan_realloc(void *ptr, uptr size, StackTrace *stack);
+void *msan_valloc(uptr size, StackTrace *stack);
+void *msan_pvalloc(uptr size, StackTrace *stack);
+void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack);
+void *msan_memalign(uptr alignment, uptr size, StackTrace *stack);
+int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ StackTrace *stack);
+
void InstallTrapHandler();
void InstallAtExitHandler();
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc
index a92b7fd12..1034dbdf9 100644
--- a/lib/msan/msan_allocator.cc
+++ b/lib/msan/msan_allocator.cc
@@ -13,7 +13,9 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_errno.h"
#include "msan.h"
#include "msan_allocator.h"
#include "msan_origin.h"
@@ -194,20 +196,8 @@ void MsanDeallocate(StackTrace *stack, void *p) {
}
}
-void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
- if (CheckForCallocOverflow(size, nmemb))
- return Allocator::FailureHandler::OnBadRequest();
- return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
-}
-
void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
- uptr alignment, bool zeroise) {
- if (!old_p)
- return MsanAllocate(stack, new_size, alignment, zeroise);
- if (!new_size) {
- MsanDeallocate(stack, old_p);
- return nullptr;
- }
+ uptr alignment) {
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
uptr old_size = meta->requested_size;
uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
@@ -215,10 +205,7 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
// We are not reallocating here.
meta->requested_size = new_size;
if (new_size > old_size) {
- if (zeroise) {
- __msan_clear_and_unpoison((char *)old_p + old_size,
- new_size - old_size);
- } else if (flags()->poison_in_malloc) {
+ if (flags()->poison_in_malloc) {
stack->tag = StackTrace::TAG_ALLOC;
PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
}
@@ -226,8 +213,7 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
return old_p;
}
uptr memcpy_size = Min(new_size, old_size);
- void *new_p = MsanAllocate(stack, new_size, alignment, zeroise);
- // Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size);
+ void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
if (new_p) {
CopyMemory(new_p, old_p, memcpy_size, stack);
MsanDeallocate(stack, old_p);
@@ -243,6 +229,67 @@ static uptr AllocationSize(const void *p) {
return b->requested_size;
}
+void *msan_malloc(uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
+}
+
+void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
+ return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
+ return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true));
+}
+
+void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
+ if (!ptr)
+ return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
+ if (size == 0) {
+ MsanDeallocate(stack, ptr);
+ return nullptr;
+ }
+ return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
+}
+
+void *msan_valloc(uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
+}
+
+void *msan_pvalloc(uptr size, StackTrace *stack) {
+ uptr PageSize = GetPageSizeCached();
+ // pvalloc(0) should allocate one page.
+ size = size == 0 ? PageSize : RoundUpTo(size, PageSize);
+ return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
+}
+
+void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
+}
+
+void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
+}
+
+int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ StackTrace *stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ Allocator::FailureHandler::OnBadRequest();
+ return errno_EINVAL;
+ }
+ void *ptr = MsanAllocate(stack, size, alignment, false);
+ if (UNLIKELY(!ptr))
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
} // namespace __msan
using namespace __msan;
diff --git a/lib/msan/msan_interceptors.cc b/lib/msan/msan_interceptors.cc
index 069777c7f..b5d22baca 100644
--- a/lib/msan/msan_interceptors.cc
+++ b/lib/msan/msan_interceptors.cc
@@ -161,58 +161,45 @@ INTERCEPTOR(void *, bcopy, const void *src, void *dest, SIZE_T n) {
INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- CHECK_EQ(alignment & (alignment - 1), 0);
CHECK_NE(memptr, 0);
- *memptr = MsanReallocate(&stack, nullptr, size, alignment, false);
- CHECK_NE(*memptr, 0);
- __msan_unpoison(memptr, sizeof(*memptr));
- return 0;
+ int res = msan_posix_memalign(memptr, alignment, size, &stack);
+ if (!res)
+ __msan_unpoison(memptr, sizeof(*memptr));
+ return res;
}
#if !SANITIZER_FREEBSD
-INTERCEPTOR(void *, memalign, SIZE_T boundary, SIZE_T size) {
+INTERCEPTOR(void *, memalign, SIZE_T alignment, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- CHECK_EQ(boundary & (boundary - 1), 0);
- void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false);
- return ptr;
+ return msan_memalign(alignment, size, &stack);
}
#define MSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
#else
#define MSAN_MAYBE_INTERCEPT_MEMALIGN
#endif
-INTERCEPTOR(void *, aligned_alloc, SIZE_T boundary, SIZE_T size) {
+INTERCEPTOR(void *, aligned_alloc, SIZE_T alignment, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- CHECK_EQ(boundary & (boundary - 1), 0);
- void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false);
- return ptr;
+ return msan_aligned_alloc(alignment, size, &stack);
}
-INTERCEPTOR(void *, __libc_memalign, SIZE_T boundary, SIZE_T size) {
+INTERCEPTOR(void *, __libc_memalign, SIZE_T alignment, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- CHECK_EQ(boundary & (boundary - 1), 0);
- void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false);
- DTLS_on_libc_memalign(ptr, size);
+ void *ptr = msan_memalign(alignment, size, &stack);
+ if (ptr)
+ DTLS_on_libc_memalign(ptr, size);
return ptr;
}
INTERCEPTOR(void *, valloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- void *ptr = MsanReallocate(&stack, nullptr, size, GetPageSizeCached(), false);
- return ptr;
+ return msan_valloc(size, &stack);
}
#if !SANITIZER_FREEBSD
INTERCEPTOR(void *, pvalloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
- uptr PageSize = GetPageSizeCached();
- size = RoundUpTo(size, PageSize);
- if (size == 0) {
- // pvalloc(0) should allocate one page.
- size = PageSize;
- }
- void *ptr = MsanReallocate(&stack, nullptr, size, PageSize, false);
- return ptr;
+ return msan_pvalloc(size, &stack);
}
#define MSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
#else
@@ -853,7 +840,7 @@ INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
if (UNLIKELY(!msan_inited))
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
return AllocateFromLocalPool(nmemb * size);
- return MsanCalloc(&stack, nmemb, size);
+ return msan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
@@ -866,12 +853,12 @@ INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
new_ptr = AllocateFromLocalPool(copy_size);
} else {
copy_size = size;
- new_ptr = MsanReallocate(&stack, nullptr, copy_size, sizeof(u64), false);
+ new_ptr = msan_malloc(copy_size, &stack);
}
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}
- return MsanReallocate(&stack, ptr, size, sizeof(u64), false);
+ return msan_realloc(ptr, size, &stack);
}
INTERCEPTOR(void *, malloc, SIZE_T size) {
@@ -879,7 +866,7 @@ INTERCEPTOR(void *, malloc, SIZE_T size) {
if (UNLIKELY(!msan_inited))
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
return AllocateFromLocalPool(size);
- return MsanReallocate(&stack, nullptr, size, sizeof(u64), false);
+ return msan_malloc(size, &stack);
}
void __msan_allocated_memory(const void *data, uptr size) {
diff --git a/lib/msan/msan_new_delete.cc b/lib/msan/msan_new_delete.cc
index c7295feeb..721926791 100644
--- a/lib/msan/msan_new_delete.cc
+++ b/lib/msan/msan_new_delete.cc
@@ -31,7 +31,7 @@ namespace std {
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
#define OPERATOR_NEW_BODY(nothrow) \
GET_MALLOC_STACK_TRACE; \
- void *res = MsanReallocate(&stack, 0, size, sizeof(u64), false);\
+ void *res = msan_malloc(size, &stack);\
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\
return res