summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl/tsan_mman.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/tsan/rtl/tsan_mman.cc')
-rw-r--r--lib/tsan/rtl/tsan_mman.cc50
1 files changed, 39 insertions, 11 deletions
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index 39c0d8607..b160a9736 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_placement_new.h"
@@ -150,13 +151,24 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
OutputReport(thr, rep);
}
+static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
+
void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
bool signal) {
- if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return ReturnNullOrDieOnFailure::OnBadRequest();
+ if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
+ }
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
- if (UNLIKELY(p == 0))
- return ReturnNullOrDieOnFailure::OnOOM();
+ if (UNLIKELY(!p)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportOutOfMemory(sz, &stack);
+ }
if (ctx && ctx->initialized)
OnUserAlloc(thr, pc, (uptr)p, sz, true);
if (signal)
@@ -178,8 +190,12 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
}
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
- if (UNLIKELY(CheckForCallocOverflow(size, n)))
- return SetErrnoOnNull(ReturnNullOrDieOnFailure::OnBadRequest());
+ if (UNLIKELY(CheckForCallocOverflow(size, n))) {
+ if (AllocatorMayReturnNull())
+ return SetErrnoOnNull(nullptr);
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportCallocOverflow(n, size, &stack);
+ }
void *p = user_alloc_internal(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);
@@ -224,7 +240,10 @@ void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
if (UNLIKELY(!IsPowerOfTwo(align))) {
errno = errno_EINVAL;
- return ReturnNullOrDieOnFailure::OnBadRequest();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidAllocationAlignment(align, &stack);
}
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
}
@@ -232,11 +251,14 @@ void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
uptr sz) {
if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
- ReturnNullOrDieOnFailure::OnBadRequest();
- return errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidPosixMemalignAlignment(align, &stack);
}
void *ptr = user_alloc_internal(thr, pc, sz, align);
if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by user_alloc_internal.
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, align));
*memptr = ptr;
@@ -246,7 +268,10 @@ int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
errno = errno_EINVAL;
- return ReturnNullOrDieOnFailure::OnBadRequest();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidAlignedAllocAlignment(sz, align, &stack);
}
return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
}
@@ -259,7 +284,10 @@ void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
errno = errno_ENOMEM;
- return ReturnNullOrDieOnFailure::OnBadRequest();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportPvallocOverflow(sz, &stack);
}
// pvalloc(0) should allocate one page.
sz = sz ? RoundUpTo(sz, PageSize) : PageSize;