summaryrefslogtreecommitdiff
path: root/lib/lsan/lsan_allocator.cc
diff options
context:
space:
mode:
authorFrancis Ricci <francisjricci@gmail.com>2017-03-22 18:42:43 +0000
committerFrancis Ricci <francisjricci@gmail.com>2017-03-22 18:42:43 +0000
commit65ede46c9b256dd0276361730dbac8b64de259a5 (patch)
tree9136bdf459c6636518aa0652ec3761a99b1bfd14 /lib/lsan/lsan_allocator.cc
parent955475ab45db24c49adad12618dfb0a25c5e3e74 (diff)
Factor lsan allocator cache accesses into a function
Summary: This patch is the first step towards allows us to move away from using __thread for the allocator cache on darwin, which is requiring for building lsan for darwin on ios version 7 and on iossim i386. This will be followed by patches to move the function into OS-specific files. Reviewers: kubamracek, kcc Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D29994 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@298537 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/lsan/lsan_allocator.cc')
-rw-r--r--lib/lsan/lsan_allocator.cc17
1 files changed, 9 insertions, 8 deletions
diff --git a/lib/lsan/lsan_allocator.cc b/lib/lsan/lsan_allocator.cc
index 428485796..a08dcf286 100644
--- a/lib/lsan/lsan_allocator.cc
+++ b/lib/lsan/lsan_allocator.cc
@@ -70,7 +70,8 @@ typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
static Allocator allocator;
-static THREADLOCAL AllocatorCache cache;
+static THREADLOCAL AllocatorCache allocator_cache;
+AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
void InitializeAllocator() {
allocator.InitLinkerInitialized(
@@ -79,7 +80,7 @@ void InitializeAllocator() {
}
void AllocatorThreadFinish() {
- allocator.SwallowCache(&cache);
+ allocator.SwallowCache(GetAllocatorCache());
}
static ChunkMetadata *Metadata(const void *p) {
@@ -111,7 +112,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size);
return nullptr;
}
- void *p = allocator.Allocate(&cache, size, alignment, false);
+ void *p = allocator.Allocate(GetAllocatorCache(), size, alignment, false);
// Do not rely on the allocator to clear the memory (it's slow).
if (cleared && allocator.FromPrimary(p))
memset(p, 0, size);
@@ -125,7 +126,7 @@ void Deallocate(void *p) {
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RunFreeHooks(p);
RegisterDeallocation(p);
- allocator.Deallocate(&cache, p);
+ allocator.Deallocate(GetAllocatorCache(), p);
}
void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
@@ -133,17 +134,17 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
RegisterDeallocation(p);
if (new_size > kMaxAllowedMallocSize) {
Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size);
- allocator.Deallocate(&cache, p);
+ allocator.Deallocate(GetAllocatorCache(), p);
return nullptr;
}
- p = allocator.Reallocate(&cache, p, new_size, alignment);
+ p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
RegisterAllocation(stack, p, new_size);
return p;
}
void GetAllocatorCacheRange(uptr *begin, uptr *end) {
- *begin = (uptr)&cache;
- *end = *begin + sizeof(cache);
+ *begin = (uptr)GetAllocatorCache();
+ *end = *begin + sizeof(AllocatorCache);
}
uptr GetMallocUsableSize(const void *p) {