diff options
author | Evgeniy Stepanov <eugeni.stepanov@gmail.com> | 2014-04-04 09:47:41 +0000 |
---|---|---|
committer | Evgeniy Stepanov <eugeni.stepanov@gmail.com> | 2014-04-04 09:47:41 +0000 |
commit | ba4ad34df1af1c321da0981e1aaff9d35c45d37f (patch) | |
tree | e1d8ac67bf297961c9cf31724f7709c2380d507c /lib/msan/msan_allocator.cc | |
parent | 8dc0d7f875b37003ce805dfe04c88bee3f02a6b8 (diff) |
[msan] Introduce MsanThread. Move thread-local allocator cache out of TLS.
This reduces .tbss from 109K down to almost nothing.
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@205618 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/msan/msan_allocator.cc')
-rw-r--r-- | lib/msan/msan_allocator.cc | 53 |
1 files changed, 40 insertions, 13 deletions
diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc index 3c74142bc..90b9b31fb 100644 --- a/lib/msan/msan_allocator.cc +++ b/lib/msan/msan_allocator.cc @@ -15,6 +15,8 @@ #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "msan.h" +#include "msan_allocator.h" +#include "msan_thread.h" namespace __msan { @@ -48,8 +50,9 @@ typedef LargeMmapAllocator<MsanMapUnmapCallback> SecondaryAllocator; typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> Allocator; -static THREADLOCAL AllocatorCache cache; static Allocator allocator; +static AllocatorCache fallback_allocator_cache; +static SpinMutex fallback_mutex; static int inited = 0; @@ -60,35 +63,51 @@ static inline void Init() { allocator.Init(); } -void MsanAllocatorThreadFinish() { - allocator.SwallowCache(&cache); +AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) { + CHECK(ms); + CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); + return reinterpret_cast<AllocatorCache *>(ms->allocator_cache); } -static void *MsanAllocate(StackTrace *stack, uptr size, - uptr alignment, bool zeroise) { +void MsanThreadLocalMallocStorage::CommitBack() { + allocator.SwallowCache(GetAllocatorCache(this)); +} + +static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment, + bool zeroise) { Init(); if (size > kMaxAllowedMallocSize) { Report("WARNING: MemorySanitizer failed to allocate %p bytes\n", (void *)size); return AllocatorReturnNull(); } - void *res = allocator.Allocate(&cache, size, alignment, false); - Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(res)); + MsanThread *t = GetCurrentThread(); + void *allocated; + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocated = allocator.Allocate(cache, size, alignment, false); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocated = allocator.Allocate(cache, size, alignment, false); + } + Metadata *meta = + reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); meta->requested_size = size; if (zeroise) { - __msan_clear_and_unpoison(res, size); + __msan_clear_and_unpoison(allocated, size); } else if (flags()->poison_in_malloc) { - __msan_poison(res, size); + __msan_poison(allocated, size); if (__msan_get_track_origins()) { u32 stack_id = StackDepotPut(stack->trace, stack->size); CHECK(stack_id); CHECK_EQ((stack_id >> 31), 0); // Higher bit is occupied by stack origins. - __msan_set_origin(res, size, stack_id); + __msan_set_origin(allocated, size, stack_id); } } - MSAN_MALLOC_HOOK(res, size); - return res; + MSAN_MALLOC_HOOK(allocated, size); + return allocated; } void MsanDeallocate(StackTrace *stack, void *p) { @@ -110,7 +129,15 @@ void MsanDeallocate(StackTrace *stack, void *p) { __msan_set_origin(p, size, stack_id); } } - allocator.Deallocate(&cache, p); + MsanThread *t = GetCurrentThread(); + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocator.Deallocate(cache, p); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocator.Deallocate(cache, p); + } } void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size, |