diff options
author | Alexey Samsonov <samsonov@google.com> | 2013-05-29 09:15:39 +0000 |
---|---|---|
committer | Alexey Samsonov <samsonov@google.com> | 2013-05-29 09:15:39 +0000 |
commit | 1f3c2fee395abc36230c445e9ebdba55c4729d35 (patch) | |
tree | 55d68895e3408521804d7db626a62e4376a7c9d9 /lib/sanitizer_common/sanitizer_allocator.cc | |
parent | 9d1525ec52430d0b8ffd6d0893b7f5529105b321 (diff) |
Make InternalAlloc/InternalFree in sanitizer runtimes libc-free by switching to a custom allocator.
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@182836 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator.cc')
-rw-r--r-- | lib/sanitizer_common/sanitizer_allocator.cc | 94 |
1 files changed, 76 insertions, 18 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator.cc b/lib/sanitizer_common/sanitizer_allocator.cc index a97a70937..efb06cf84 100644 --- a/lib/sanitizer_common/sanitizer_allocator.cc +++ b/lib/sanitizer_common/sanitizer_allocator.cc @@ -9,44 +9,102 @@ // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries. -// This allocator that is used inside run-times. +// This allocator is used inside run-times. //===----------------------------------------------------------------------===// +#include "sanitizer_allocator.h" +#include "sanitizer_allocator_internal.h" #include "sanitizer_common.h" -// FIXME: We should probably use more low-level allocator that would -// mmap some pages and split them into chunks to fulfill requests. -#if SANITIZER_LINUX && !SANITIZER_ANDROID -extern "C" void *__libc_malloc(__sanitizer::uptr size); +namespace __sanitizer { + +// ThreadSanitizer for Go uses libc malloc/free. +#if defined(SANITIZER_GO) +# if SANITIZER_LINUX && !SANITIZER_ANDROID +extern "C" void *__libc_malloc(uptr size); extern "C" void __libc_free(void *ptr); -# define LIBC_MALLOC __libc_malloc -# define LIBC_FREE __libc_free -#else // SANITIZER_LINUX && !SANITIZER_ANDROID -# include <stdlib.h> -# define LIBC_MALLOC malloc -# define LIBC_FREE free -#endif // SANITIZER_LINUX && !SANITIZER_ANDROID +# define LIBC_MALLOC __libc_malloc +# define LIBC_FREE __libc_free +# else +# include <stdlib.h> +# define LIBC_MALLOC malloc +# define LIBC_FREE free +# endif -namespace __sanitizer { +static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) { + (void)cache; + return LIBC_MALLOC(size); +} + +static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { + (void)cache; + LIBC_FREE(ptr); +} + +InternalAllocator *internal_allocator() { + return 0; +} + +#else // SANITIZER_GO + +static char internal_alloc_placeholder[sizeof(InternalAllocator)] ALIGNED(64); +static atomic_uint8_t internal_allocator_initialized; +static StaticSpinMutex internal_alloc_init_mu; + +static InternalAllocatorCache internal_allocator_cache; +static StaticSpinMutex internal_allocator_cache_mu; + +InternalAllocator *internal_allocator() { + InternalAllocator *internal_allocator_instance = + reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder); + if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) { + SpinMutexLock l(&internal_alloc_init_mu); + if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == + 0) { + internal_allocator_instance->Init(); + atomic_store(&internal_allocator_initialized, 1, memory_order_release); + } + } + return internal_allocator_instance; +} + +static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) { + if (cache == 0) { + SpinMutexLock l(&internal_allocator_cache_mu); + return internal_allocator()->Allocate(&internal_allocator_cache, size, 8, + false); + } + return internal_allocator()->Allocate(cache, size, 8, false); +} + +static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { + if (cache == 0) { + SpinMutexLock l(&internal_allocator_cache_mu); + return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); + } + internal_allocator()->Deallocate(cache, ptr); +} + +#endif // SANITIZER_GO const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; -void *InternalAlloc(uptr size) { +void *InternalAlloc(uptr size, InternalAllocatorCache *cache) { if (size + sizeof(u64) < size) return 0; - void *p = LIBC_MALLOC(size + sizeof(u64)); + void *p = RawInternalAlloc(size + sizeof(u64), cache); if (p == 0) return 0; ((u64*)p)[0] = kBlockMagic; return (char*)p + sizeof(u64); } -void InternalFree(void *addr) { +void InternalFree(void *addr, InternalAllocatorCache *cache) { if (addr == 0) return; addr = (char*)addr - sizeof(u64); - CHECK_EQ(((u64*)addr)[0], kBlockMagic); + CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); ((u64*)addr)[0] = 0; - LIBC_FREE(addr); + RawInternalFree(addr, cache); } // LowLevelAllocator |