summaryrefslogtreecommitdiff
path: root/lib/scudo/scudo_allocator.cpp
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2017-09-22 15:35:37 +0000
committerKostya Kortchinsky <kostyak@google.com>2017-09-22 15:35:37 +0000
commit10b087da020f9008012c82d8cb1c35463108c912 (patch)
tree7807ffae30bc167af4820ce072298f3a66b593f8 /lib/scudo/scudo_allocator.cpp
parent7e3a14329a9b9ff9434d442fa5ad7c90be14416e (diff)
[scudo] Scudo thread specific data refactor, part 1
Summary: We are going through an overhaul of Scudo's TSD, to allow for new platforms to be integrated more easily, and make the code more sound. This first part is mostly renaming, preferring some shorter names, correcting some comments. I removed `getPrng` and `getAllocatorCache` to directly access the members, there was not really any benefit to them (and it was suggested by Dmitry in D37590). The only functional change is in `scudo_tls_android.cpp`: we enforce bounds to the `NumberOfTSDs` and most of the logic in `getTSDAndLockSlow` is skipped if we only have 1 TSD. Reviewers: alekseyshl, dvyukov, kcc Reviewed By: dvyukov Subscribers: llvm-commits, srhines Differential Revision: https://reviews.llvm.org/D38139 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@313987 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/scudo/scudo_allocator.cpp')
-rw-r--r--lib/scudo/scudo_allocator.cpp60
1 files changed, 24 insertions, 36 deletions
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index 9d65b8683..e490a469d 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -250,19 +250,11 @@ struct QuarantineCallback {
typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
typedef ScudoQuarantine::Cache ScudoQuarantineCache;
COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
- sizeof(ScudoThreadContext::QuarantineCachePlaceHolder));
+ sizeof(ScudoTSD::QuarantineCachePlaceHolder));
-AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) {
- return &ThreadContext->Cache;
-}
-
-ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) {
- return reinterpret_cast<
- ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder);
-}
-
-ScudoPrng *getPrng(ScudoThreadContext *ThreadContext) {
- return &ThreadContext->Prng;
+ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) {
+ return reinterpret_cast<ScudoQuarantineCache *>(
+ TSD->QuarantineCachePlaceHolder);
}
struct ScudoAllocator {
@@ -381,12 +373,11 @@ struct ScudoAllocator {
uptr AllocSize;
if (FromPrimary) {
AllocSize = AlignedSize;
- ScudoThreadContext *ThreadContext = getThreadContextAndLock();
- if (LIKELY(ThreadContext)) {
- Salt = getPrng(ThreadContext)->getU8();
- Ptr = BackendAllocator.allocatePrimary(getAllocatorCache(ThreadContext),
- AllocSize);
- ThreadContext->unlock();
+ ScudoTSD *TSD = getTSDAndLock();
+ if (LIKELY(TSD)) {
+ Salt = TSD->Prng.getU8();
+ Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, AllocSize);
+ TSD->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
Salt = FallbackPrng.getU8();
@@ -454,11 +445,10 @@ struct ScudoAllocator {
Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(Header);
if (Header->FromPrimary) {
- ScudoThreadContext *ThreadContext = getThreadContextAndLock();
- if (LIKELY(ThreadContext)) {
- getBackendAllocator().deallocatePrimary(
- getAllocatorCache(ThreadContext), Ptr);
- ThreadContext->unlock();
+ ScudoTSD *TSD = getTSDAndLock();
+ if (LIKELY(TSD)) {
+ getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr);
+ TSD->unlock();
} else {
SpinMutexLock Lock(&FallbackMutex);
getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr);
@@ -476,13 +466,12 @@ struct ScudoAllocator {
UnpackedHeader NewHeader = *Header;
NewHeader.State = ChunkQuarantine;
Chunk->compareExchangeHeader(&NewHeader, Header);
- ScudoThreadContext *ThreadContext = getThreadContextAndLock();
- if (LIKELY(ThreadContext)) {
- AllocatorQuarantine.Put(getQuarantineCache(ThreadContext),
- QuarantineCallback(
- getAllocatorCache(ThreadContext)),
+ ScudoTSD *TSD = getTSDAndLock();
+ if (LIKELY(TSD)) {
+ AllocatorQuarantine.Put(getQuarantineCache(TSD),
+ QuarantineCallback(&TSD->Cache),
Chunk, EstimatedSize);
- ThreadContext->unlock();
+ TSD->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
AllocatorQuarantine.Put(&FallbackQuarantineCache,
@@ -607,11 +596,10 @@ struct ScudoAllocator {
return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
}
- void commitBack(ScudoThreadContext *ThreadContext) {
- AllocatorCache *Cache = getAllocatorCache(ThreadContext);
- AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext),
- QuarantineCallback(Cache));
- BackendAllocator.destroyCache(Cache);
+ void commitBack(ScudoTSD *TSD) {
+ AllocatorQuarantine.Drain(getQuarantineCache(TSD),
+ QuarantineCallback(&TSD->Cache));
+ BackendAllocator.destroyCache(&TSD->Cache);
}
uptr getStats(AllocatorStat StatType) {
@@ -637,13 +625,13 @@ static void initScudoInternal(const AllocatorOptions &Options) {
Instance.init(Options);
}
-void ScudoThreadContext::init() {
+void ScudoTSD::init() {
getBackendAllocator().initCache(&Cache);
Prng.init();
memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
}
-void ScudoThreadContext::commitBack() {
+void ScudoTSD::commitBack() {
Instance.commitBack(this);
}