summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator.cc
diff options
context:
space:
mode:
authorKuba Brecka <kuba.brecka@gmail.com>2016-06-06 18:18:47 +0000
committerKuba Brecka <kuba.brecka@gmail.com>2016-06-06 18:18:47 +0000
commit4af8674a2233f62cb385d0e47ddb1fd7d6134141 (patch)
treebad52863c02b605bf989a4eb63c610cfddda9d5c /lib/sanitizer_common/sanitizer_allocator.cc
parente2e2f47b377ba5c5151bddc528f12fc9c2812bdf (diff)
[tsan] Switch to InternalAlloc everywhere __libc_malloc is currently used
This patch replaces all uses of __libc_malloc and friends with the internal allocator. It seems that the only reason why we have calls to __libc_malloc in the first place was the lack of the internal allocator at the time. Using the internal allocator will also make sure that the system allocator is never used (this is the same behavior as ASan), and we don’t have to worry about working with unknown pointers coming from the system allocator. Differential Revision: http://reviews.llvm.org/D21025 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@271916 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator.cc')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.cc82
1 files changed, 67 insertions, 15 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc
index 538e2db95..df298c622 100644
--- a/lib/sanitizer_common/sanitizer_allocator.cc
+++ b/lib/sanitizer_common/sanitizer_allocator.cc
@@ -22,30 +22,47 @@ namespace __sanitizer {
#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
+extern "C" void *__libc_memalign(uptr alignment, uptr size);
+extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
-# define LIBC_MALLOC __libc_malloc
-# define LIBC_FREE __libc_free
# else
# include <stdlib.h>
-# define LIBC_MALLOC malloc
-# define LIBC_FREE free
+# define __libc_malloc malloc
+static void *__libc_memalign(uptr alignment, uptr size) {
+ void *p;
+ uptr error = posix_memalign(&p, alignment, size);
+ if (error) return nullptr;
+ return p;
+}
+# define __libc_realloc realloc
+# define __libc_free free
# endif
-static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
+static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
+ uptr alignment) {
+ (void)cache;
+ if (alignment == 0)
+ return __libc_malloc(size);
+ else
+ return __libc_memalign(alignment, size);
+}
+
+static void *RawInternalRealloc(void *ptr, uptr size,
+ InternalAllocatorCache *cache) {
(void)cache;
- return LIBC_MALLOC(size);
+ return __libc_realloc(ptr, size);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
(void)cache;
- LIBC_FREE(ptr);
+ __libc_free(ptr);
}
InternalAllocator *internal_allocator() {
return 0;
}
-#else // SANITIZER_GO
+#else // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
@@ -68,13 +85,26 @@ InternalAllocator *internal_allocator() {
return internal_allocator_instance;
}
-static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
+static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
+ uptr alignment) {
+ if (alignment == 0) alignment = 8;
if (cache == 0) {
SpinMutexLock l(&internal_allocator_cache_mu);
- return internal_allocator()->Allocate(&internal_allocator_cache, size, 8,
- false);
+ return internal_allocator()->Allocate(&internal_allocator_cache, size,
+ alignment, false);
}
- return internal_allocator()->Allocate(cache, size, 8, false);
+ return internal_allocator()->Allocate(cache, size, alignment, false);
+}
+
+static void *RawInternalRealloc(void *ptr, uptr size,
+ InternalAllocatorCache *cache) {
+ uptr alignment = 8;
+ if (cache == 0) {
+ SpinMutexLock l(&internal_allocator_cache_mu);
+ return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
+ size, alignment);
+ }
+ return internal_allocator()->Reallocate(cache, ptr, size, alignment);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
@@ -85,20 +115,42 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
-#endif // SANITIZER_GO
+#endif // defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
-void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
+void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
if (size + sizeof(u64) < size)
return nullptr;
- void *p = RawInternalAlloc(size + sizeof(u64), cache);
+ void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
if (!p)
return nullptr;
((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64);
}
+void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
+ if (!addr)
+ return InternalAlloc(size, cache);
+ if (size + sizeof(u64) < size)
+ return nullptr;
+ addr = (char*)addr - sizeof(u64);
+ size = size + sizeof(u64);
+ CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
+ void *p = RawInternalRealloc(addr, size, cache);
+ if (!p)
+ return nullptr;
+ return (char*)p + sizeof(u64);
+}
+
+void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
+ if (CallocShouldReturnNullDueToOverflow(count, size))
+ return internal_allocator()->ReturnNullOrDie();
+ void *p = InternalAlloc(count * size, cache);
+ if (p) internal_memset(p, 0, count * size);
+ return p;
+}
+
void InternalFree(void *addr, InternalAllocatorCache *cache) {
if (!addr)
return;