summaryrefslogtreecommitdiff
path: root/lib/tsan/rtl
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2017-07-24 21:22:59 +0000
committerAlex Shlyapnikov <alekseys@google.com>2017-07-24 21:22:59 +0000
commitf728f46988ea1fc5c921dfdf53eea02a1050f80e (patch)
tree24089faf3aad3f4c1d227df968a48ba559d94af1 /lib/tsan/rtl
parentc78e1b8ec98c38eb26d139313711548e1f82464a (diff)
[Sanitizers] TSan allocator set errno on failure.
Summary: Set proper errno code on allocation failures and change realloc, pvalloc, aligned_alloc, memalign and posix_memalign implementation to satisfy their man-specified requirements. Modify allocator API implementation to bring it closer to other sanitizers allocators. Reviewers: dvyukov Subscribers: llvm-commits, kubamracek Differential Revision: https://reviews.llvm.org/D35690 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@308929 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/tsan/rtl')
-rw-r--r--lib/tsan/rtl/tsan_fd.cc6
-rw-r--r--lib/tsan/rtl/tsan_interceptors.cc16
-rw-r--r--lib/tsan/rtl/tsan_libdispatch_mac.cc3
-rw-r--r--lib/tsan/rtl/tsan_malloc_mac.cc4
-rw-r--r--lib/tsan/rtl/tsan_mman.cc86
-rw-r--r--lib/tsan/rtl/tsan_mman.h15
6 files changed, 93 insertions, 37 deletions
diff --git a/lib/tsan/rtl/tsan_fd.cc b/lib/tsan/rtl/tsan_fd.cc
index d84df4a64..f13a7432e 100644
--- a/lib/tsan/rtl/tsan_fd.cc
+++ b/lib/tsan/rtl/tsan_fd.cc
@@ -48,8 +48,8 @@ static bool bogusfd(int fd) {
}
static FdSync *allocsync(ThreadState *thr, uptr pc) {
- FdSync *s = (FdSync*)user_alloc(thr, pc, sizeof(FdSync), kDefaultAlignment,
- false);
+ FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
+ kDefaultAlignment, false);
atomic_store(&s->rc, 1, memory_order_relaxed);
return s;
}
@@ -79,7 +79,7 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
if (l1 == 0) {
uptr size = kTableSizeL2 * sizeof(FdDesc);
// We need this to reside in user memory to properly catch races on it.
- void *p = user_alloc(thr, pc, size, kDefaultAlignment, false);
+ void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
internal_memset(p, 0, size);
MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
diff --git a/lib/tsan/rtl/tsan_interceptors.cc b/lib/tsan/rtl/tsan_interceptors.cc
index 001123f49..4c24c46eb 100644
--- a/lib/tsan/rtl/tsan_interceptors.cc
+++ b/lib/tsan/rtl/tsan_interceptors.cc
@@ -584,7 +584,7 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) {
TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
- return user_alloc(thr, pc, sz, align);
+ return user_memalign(thr, pc, align, sz);
}
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
@@ -730,7 +730,7 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
- return user_alloc(thr, pc, sz, align);
+ return user_memalign(thr, pc, align, sz);
}
#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
#else
@@ -739,21 +739,20 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
- SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
- return user_alloc(thr, pc, sz, align);
+ SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
+ return user_aligned_alloc(thr, pc, align, sz);
}
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(valloc, sz);
- return user_alloc(thr, pc, sz, GetPageSizeCached());
+ return user_valloc(thr, pc, sz);
}
#endif
#if SANITIZER_LINUX
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
- sz = RoundUp(sz, GetPageSizeCached());
- return user_alloc(thr, pc, sz, GetPageSizeCached());
+ return user_pvalloc(thr, pc, sz);
}
#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
#else
@@ -763,8 +762,7 @@ TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
- *memptr = user_alloc(thr, pc, sz, align);
- return 0;
+ return user_posix_memalign(thr, pc, memptr, align, sz);
}
#endif
diff --git a/lib/tsan/rtl/tsan_libdispatch_mac.cc b/lib/tsan/rtl/tsan_libdispatch_mac.cc
index 8c759a3be..4a36f03ff 100644
--- a/lib/tsan/rtl/tsan_libdispatch_mac.cc
+++ b/lib/tsan/rtl/tsan_libdispatch_mac.cc
@@ -86,7 +86,8 @@ static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
void *orig_context,
dispatch_function_t orig_work) {
tsan_block_context_t *new_context =
- (tsan_block_context_t *)user_alloc(thr, pc, sizeof(tsan_block_context_t));
+ (tsan_block_context_t *)user_alloc_internal(thr, pc,
+ sizeof(tsan_block_context_t));
new_context->queue = queue;
new_context->orig_context = orig_context;
new_context->orig_work = orig_work;
diff --git a/lib/tsan/rtl/tsan_malloc_mac.cc b/lib/tsan/rtl/tsan_malloc_mac.cc
index 8d31ccbca..455c95df6 100644
--- a/lib/tsan/rtl/tsan_malloc_mac.cc
+++ b/lib/tsan/rtl/tsan_malloc_mac.cc
@@ -26,7 +26,7 @@ using namespace __tsan;
#define COMMON_MALLOC_FORCE_UNLOCK()
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
void *p = \
- user_alloc(cur_thread(), StackTrace::GetCurrentPc(), size, alignment)
+ user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
#define COMMON_MALLOC_MALLOC(size) \
if (cur_thread()->in_symbolizer) return InternalAlloc(size); \
SCOPED_INTERCEPTOR_RAW(malloc, size); \
@@ -43,7 +43,7 @@ using namespace __tsan;
if (cur_thread()->in_symbolizer) \
return InternalAlloc(size, nullptr, GetPageSizeCached()); \
SCOPED_INTERCEPTOR_RAW(valloc, size); \
- void *p = user_alloc(thr, pc, size, GetPageSizeCached())
+ void *p = user_valloc(thr, pc, size)
#define COMMON_MALLOC_FREE(ptr) \
if (cur_thread()->in_symbolizer) return InternalFree(ptr); \
SCOPED_INTERCEPTOR_RAW(free, ptr); \
diff --git a/lib/tsan/rtl/tsan_mman.cc b/lib/tsan/rtl/tsan_mman.cc
index f79dccddb..3f7a5e76c 100644
--- a/lib/tsan/rtl/tsan_mman.cc
+++ b/lib/tsan/rtl/tsan_mman.cc
@@ -149,11 +149,12 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
OutputReport(thr, rep);
}
-void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
+ bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return Allocator::FailureHandler::OnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
- if (p == 0)
+ if (UNLIKELY(p == 0))
return 0;
if (ctx && ctx->initialized)
OnUserAlloc(thr, pc, (uptr)p, sz, true);
@@ -162,15 +163,6 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
return p;
}
-void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
- if (CheckForCallocOverflow(size, n))
- return Allocator::FailureHandler::OnBadRequest();
- void *p = user_alloc(thr, pc, n * size);
- if (p)
- internal_memset(p, 0, n * size);
- return p;
-}
-
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
ScopedGlobalProcessor sgp;
if (ctx && ctx->initialized)
@@ -180,6 +172,19 @@ void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
SignalUnsafeCall(thr, pc);
}
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
+}
+
+void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
+ if (UNLIKELY(CheckForCallocOverflow(size, n)))
+ return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
+ void *p = user_alloc_internal(thr, pc, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
+ return SetErrnoOnNull(p);
+}
+
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
ctx->metamap.AllocBlock(thr, pc, p, sz);
@@ -200,15 +205,60 @@ void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
- void *p2 = user_alloc(thr, pc, sz);
- if (p2 == 0)
- return 0;
- if (p) {
- uptr oldsz = user_alloc_usable_size(p);
- internal_memcpy(p2, p, min(oldsz, sz));
+ if (!p)
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
+ if (!sz) {
user_free(thr, pc, p);
+ return nullptr;
}
- return p2;
+ void *new_p = user_alloc_internal(thr, pc, sz);
+ if (new_p) {
+ uptr old_sz = user_alloc_usable_size(p);
+ internal_memcpy(new_p, p, min(old_sz, sz));
+ user_free(thr, pc, p);
+ }
+ return SetErrnoOnNull(new_p);
+}
+
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!IsPowerOfTwo(align))) {
+ errno = errno_EINVAL;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
+ Allocator::FailureHandler::OnBadRequest();
+ return errno_EINVAL;
+ }
+ void *ptr = user_alloc_internal(thr, pc, sz, align);
+ if (UNLIKELY(!ptr))
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, align));
+ *memptr = ptr;
+ return 0;
+}
+
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
+ errno = errno_EINVAL;
+ return Allocator::FailureHandler::OnBadRequest();
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
+}
+
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
+ uptr PageSize = GetPageSizeCached();
+ // pvalloc(0) should allocate one page.
+ sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
}
uptr user_alloc_usable_size(const void *p) {
diff --git a/lib/tsan/rtl/tsan_mman.h b/lib/tsan/rtl/tsan_mman.h
index 8cdeeb35a..6042c5c5d 100644
--- a/lib/tsan/rtl/tsan_mman.h
+++ b/lib/tsan/rtl/tsan_mman.h
@@ -27,13 +27,20 @@ void AllocatorProcFinish(Processor *proc);
void AllocatorPrintStats();
// For user allocations.
-void *user_alloc(ThreadState *thr, uptr pc, uptr sz,
- uptr align = kDefaultAlignment, bool signal = true);
-void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
+ uptr align = kDefaultAlignment, bool signal = true);
// Does not accept NULL.
void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
+// Interceptor implementations.
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
-void *user_alloc_aligned(ThreadState *thr, uptr pc, uptr sz, uptr align);
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz);
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz);
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz);
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz);
uptr user_alloc_usable_size(const void *p);
// Invoking malloc/free hooks that may be installed by the user.