summaryrefslogtreecommitdiff
path: root/lib/scudo/scudo_allocator.cpp
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2017-09-25 15:12:08 +0000
committerKostya Kortchinsky <kostyak@google.com>2017-09-25 15:12:08 +0000
commit9230e83455810a9967796d00ac39cfe8d22bb8ef (patch)
tree1825e13c543a5258dc7668717977ac99d6b39fdf /lib/scudo/scudo_allocator.cpp
parentae2601605cdda1bc2730041f48e3b2786ad896e5 (diff)
[scudo] Scudo thread specific data refactor, part 2
Summary: Following D38139, we now consolidate the TSD definition, merging the shared TSD definition with the exclusive TSD definition. We introduce a boolean set at initializaton denoting the need for the TSD to be unlocked or not. This adds some unused members to the exclusive TSD, but increases consistency and reduces the definitions fragmentation. We remove the fallback mechanism from `scudo_allocator.cpp` and add a fallback TSD in the non-shared version. Since the shared version doesn't require one, this makes overall more sense. There are a couple of additional cosmetic changes: removing the header guards from the remaining `.inc` files, added error string to a `CHECK`. Question to reviewers: I thought about friending `getTSDAndLock` in `ScudoTSD` so that the `FallbackTSD` could `Mutex.Lock()` directly instead of `lock()` which involved zeroing out the `Precedence`, which is unused otherwise. Is it worth doing? Reviewers: alekseyshl, dvyukov, kcc Reviewed By: dvyukov Subscribers: srhines, llvm-commits Differential Revision: https://reviews.llvm.org/D38183 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@314110 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/scudo/scudo_allocator.cpp')
-rw-r--r--lib/scudo/scudo_allocator.cpp53
1 files changed, 12 insertions, 41 deletions
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index e490a469d..606439ea1 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -269,14 +269,6 @@ struct ScudoAllocator {
StaticSpinMutex GlobalPrngMutex;
ScudoPrng GlobalPrng;
- // The fallback caches are used when the thread local caches have been
- // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
- // be accessed by different threads.
- StaticSpinMutex FallbackMutex;
- AllocatorCache FallbackAllocatorCache;
- ScudoQuarantineCache FallbackQuarantineCache;
- ScudoPrng FallbackPrng;
-
u32 QuarantineChunksUpToSize;
bool DeallocationTypeMismatch;
@@ -284,8 +276,7 @@ struct ScudoAllocator {
bool DeleteSizeMismatch;
explicit ScudoAllocator(LinkerInitialized)
- : AllocatorQuarantine(LINKER_INITIALIZED),
- FallbackQuarantineCache(LINKER_INITIALIZED) {}
+ : AllocatorQuarantine(LINKER_INITIALIZED) {}
void init(const AllocatorOptions &Options) {
// Verify that the header offset field can hold the maximum offset. In the
@@ -329,8 +320,6 @@ struct ScudoAllocator {
QuarantineChunksUpToSize = Options.QuarantineChunksUpToSize;
GlobalPrng.init();
Cookie = GlobalPrng.getU64();
- BackendAllocator.initCache(&FallbackAllocatorCache);
- FallbackPrng.init();
}
// Helper function that checks for a valid Scudo chunk. nullptr isn't.
@@ -374,16 +363,9 @@ struct ScudoAllocator {
if (FromPrimary) {
AllocSize = AlignedSize;
ScudoTSD *TSD = getTSDAndLock();
- if (LIKELY(TSD)) {
- Salt = TSD->Prng.getU8();
- Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, AllocSize);
- TSD->unlock();
- } else {
- SpinMutexLock l(&FallbackMutex);
- Salt = FallbackPrng.getU8();
- Ptr = BackendAllocator.allocatePrimary(&FallbackAllocatorCache,
- AllocSize);
- }
+ Salt = TSD->Prng.getU8();
+ Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, AllocSize);
+ TSD->unlock();
} else {
{
SpinMutexLock l(&GlobalPrngMutex);
@@ -446,13 +428,8 @@ struct ScudoAllocator {
void *Ptr = Chunk->getAllocBeg(Header);
if (Header->FromPrimary) {
ScudoTSD *TSD = getTSDAndLock();
- if (LIKELY(TSD)) {
- getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr);
- TSD->unlock();
- } else {
- SpinMutexLock Lock(&FallbackMutex);
- getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr);
- }
+ getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr);
+ TSD->unlock();
} else {
getBackendAllocator().deallocateSecondary(Ptr);
}
@@ -467,17 +444,10 @@ struct ScudoAllocator {
NewHeader.State = ChunkQuarantine;
Chunk->compareExchangeHeader(&NewHeader, Header);
ScudoTSD *TSD = getTSDAndLock();
- if (LIKELY(TSD)) {
- AllocatorQuarantine.Put(getQuarantineCache(TSD),
- QuarantineCallback(&TSD->Cache),
- Chunk, EstimatedSize);
- TSD->unlock();
- } else {
- SpinMutexLock l(&FallbackMutex);
- AllocatorQuarantine.Put(&FallbackQuarantineCache,
- QuarantineCallback(&FallbackAllocatorCache),
- Chunk, EstimatedSize);
- }
+ AllocatorQuarantine.Put(getQuarantineCache(TSD),
+ QuarantineCallback(&TSD->Cache),
+ Chunk, EstimatedSize);
+ TSD->unlock();
}
}
@@ -625,7 +595,8 @@ static void initScudoInternal(const AllocatorOptions &Options) {
Instance.init(Options);
}
-void ScudoTSD::init() {
+void ScudoTSD::init(bool Shared) {
+ UnlockRequired = Shared;
getBackendAllocator().initCache(&Cache);
Prng.init();
memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));