summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/scudo/scudo_allocator.cpp60
-rw-r--r--lib/scudo/scudo_tls.h4
-rw-r--r--lib/scudo/scudo_tls_android.cpp81
-rw-r--r--lib/scudo/scudo_tls_android.inc15
-rw-r--r--lib/scudo/scudo_tls_context_android.inc16
-rw-r--r--lib/scudo/scudo_tls_context_linux.inc2
-rw-r--r--lib/scudo/scudo_tls_linux.cpp6
-rw-r--r--lib/scudo/scudo_tls_linux.inc6
8 files changed, 90 insertions, 100 deletions
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index 9d65b8683..e490a469d 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -250,19 +250,11 @@ struct QuarantineCallback {
typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
typedef ScudoQuarantine::Cache ScudoQuarantineCache;
COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
- sizeof(ScudoThreadContext::QuarantineCachePlaceHolder));
+ sizeof(ScudoTSD::QuarantineCachePlaceHolder));
-AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) {
- return &ThreadContext->Cache;
-}
-
-ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) {
- return reinterpret_cast<
- ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder);
-}
-
-ScudoPrng *getPrng(ScudoThreadContext *ThreadContext) {
- return &ThreadContext->Prng;
+ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) {
+ return reinterpret_cast<ScudoQuarantineCache *>(
+ TSD->QuarantineCachePlaceHolder);
}
struct ScudoAllocator {
@@ -381,12 +373,11 @@ struct ScudoAllocator {
uptr AllocSize;
if (FromPrimary) {
AllocSize = AlignedSize;
- ScudoThreadContext *ThreadContext = getThreadContextAndLock();
- if (LIKELY(ThreadContext)) {
- Salt = getPrng(ThreadContext)->getU8();
- Ptr = BackendAllocator.allocatePrimary(getAllocatorCache(ThreadContext),
- AllocSize);
- ThreadContext->unlock();
+ ScudoTSD *TSD = getTSDAndLock();
+ if (LIKELY(TSD)) {
+ Salt = TSD->Prng.getU8();
+ Ptr = BackendAllocator.allocatePrimary(&TSD->Cache, AllocSize);
+ TSD->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
Salt = FallbackPrng.getU8();
@@ -454,11 +445,10 @@ struct ScudoAllocator {
Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(Header);
if (Header->FromPrimary) {
- ScudoThreadContext *ThreadContext = getThreadContextAndLock();
- if (LIKELY(ThreadContext)) {
- getBackendAllocator().deallocatePrimary(
- getAllocatorCache(ThreadContext), Ptr);
- ThreadContext->unlock();
+ ScudoTSD *TSD = getTSDAndLock();
+ if (LIKELY(TSD)) {
+ getBackendAllocator().deallocatePrimary(&TSD->Cache, Ptr);
+ TSD->unlock();
} else {
SpinMutexLock Lock(&FallbackMutex);
getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr);
@@ -476,13 +466,12 @@ struct ScudoAllocator {
UnpackedHeader NewHeader = *Header;
NewHeader.State = ChunkQuarantine;
Chunk->compareExchangeHeader(&NewHeader, Header);
- ScudoThreadContext *ThreadContext = getThreadContextAndLock();
- if (LIKELY(ThreadContext)) {
- AllocatorQuarantine.Put(getQuarantineCache(ThreadContext),
- QuarantineCallback(
- getAllocatorCache(ThreadContext)),
+ ScudoTSD *TSD = getTSDAndLock();
+ if (LIKELY(TSD)) {
+ AllocatorQuarantine.Put(getQuarantineCache(TSD),
+ QuarantineCallback(&TSD->Cache),
Chunk, EstimatedSize);
- ThreadContext->unlock();
+ TSD->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
AllocatorQuarantine.Put(&FallbackQuarantineCache,
@@ -607,11 +596,10 @@ struct ScudoAllocator {
return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
}
- void commitBack(ScudoThreadContext *ThreadContext) {
- AllocatorCache *Cache = getAllocatorCache(ThreadContext);
- AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext),
- QuarantineCallback(Cache));
- BackendAllocator.destroyCache(Cache);
+ void commitBack(ScudoTSD *TSD) {
+ AllocatorQuarantine.Drain(getQuarantineCache(TSD),
+ QuarantineCallback(&TSD->Cache));
+ BackendAllocator.destroyCache(&TSD->Cache);
}
uptr getStats(AllocatorStat StatType) {
@@ -637,13 +625,13 @@ static void initScudoInternal(const AllocatorOptions &Options) {
Instance.init(Options);
}
-void ScudoThreadContext::init() {
+void ScudoTSD::init() {
getBackendAllocator().initCache(&Cache);
Prng.init();
memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
}
-void ScudoThreadContext::commitBack() {
+void ScudoTSD::commitBack() {
Instance.commitBack(this);
}
diff --git a/lib/scudo/scudo_tls.h b/lib/scudo/scudo_tls.h
index 4784f6a30..a3992e264 100644
--- a/lib/scudo/scudo_tls.h
+++ b/lib/scudo/scudo_tls.h
@@ -28,7 +28,7 @@ namespace __scudo {
#include "scudo_tls_context_android.inc"
#include "scudo_tls_context_linux.inc"
-struct ALIGNED(64) ScudoThreadContext : public ScudoThreadContextPlatform {
+struct ALIGNED(64) ScudoTSD : public ScudoTSDPlatform {
AllocatorCache Cache;
ScudoPrng Prng;
uptr QuarantineCachePlaceHolder[4];
@@ -38,7 +38,7 @@ struct ALIGNED(64) ScudoThreadContext : public ScudoThreadContextPlatform {
void initThread(bool MinimalInit);
-// Platform specific dastpath functions definitions.
+// Platform specific fastpath functions definitions.
#include "scudo_tls_android.inc"
#include "scudo_tls_linux.inc"
diff --git a/lib/scudo/scudo_tls_android.cpp b/lib/scudo/scudo_tls_android.cpp
index c0ea417ab..3f215a72f 100644
--- a/lib/scudo/scudo_tls_android.cpp
+++ b/lib/scudo/scudo_tls_android.cpp
@@ -24,9 +24,9 @@ namespace __scudo {
static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
static pthread_key_t PThreadKey;
-static atomic_uint32_t ThreadContextCurrentIndex;
-static ScudoThreadContext *ThreadContexts;
-static uptr NumberOfContexts;
+static atomic_uint32_t CurrentIndex;
+static ScudoTSD *TSDs;
+static u32 NumberOfTSDs;
// sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used as they allocate memory.
static uptr getNumberOfCPUs() {
@@ -42,52 +42,55 @@ static void initOnce() {
// TODO(kostyak): remove and restrict to N and above.
CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0);
initScudo();
- NumberOfContexts = getNumberOfCPUs();
- ThreadContexts = reinterpret_cast<ScudoThreadContext *>(
- MmapOrDie(sizeof(ScudoThreadContext) * NumberOfContexts, __func__));
- for (uptr i = 0; i < NumberOfContexts; i++)
- ThreadContexts[i].init();
+ NumberOfTSDs = getNumberOfCPUs();
+ if (NumberOfTSDs == 0)
+ NumberOfTSDs = 1;
+ if (NumberOfTSDs > 32)
+ NumberOfTSDs = 32;
+ TSDs = reinterpret_cast<ScudoTSD *>(
+ MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs"));
+ for (u32 i = 0; i < NumberOfTSDs; i++)
+ TSDs[i].init();
}
void initThread(bool MinimalInit) {
pthread_once(&GlobalInitialized, initOnce);
// Initial context assignment is done in a plain round-robin fashion.
- u32 Index = atomic_fetch_add(&ThreadContextCurrentIndex, 1,
- memory_order_relaxed);
- ScudoThreadContext *ThreadContext =
- &ThreadContexts[Index % NumberOfContexts];
- *get_android_tls_ptr() = reinterpret_cast<uptr>(ThreadContext);
+ u32 Index = atomic_fetch_add(&CurrentIndex, 1, memory_order_relaxed);
+ ScudoTSD *TSD = &TSDs[Index % NumberOfTSDs];
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(TSD);
}
-ScudoThreadContext *getThreadContextAndLockSlow() {
- ScudoThreadContext *ThreadContext;
- // Go through all the contexts and find the first unlocked one.
- for (u32 i = 0; i < NumberOfContexts; i++) {
- ThreadContext = &ThreadContexts[i];
- if (ThreadContext->tryLock()) {
- *get_android_tls_ptr() = reinterpret_cast<uptr>(ThreadContext);
- return ThreadContext;
+ScudoTSD *getTSDAndLockSlow() {
+ ScudoTSD *TSD;
+ if (NumberOfTSDs > 1) {
+ // Go through all the contexts and find the first unlocked one.
+ for (u32 i = 0; i < NumberOfTSDs; i++) {
+ TSD = &TSDs[i];
+ if (TSD->tryLock()) {
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(TSD);
+ return TSD;
+ }
}
- }
- // No luck, find the one with the lowest precedence, and slow lock it.
- u64 Precedence = UINT64_MAX;
- for (u32 i = 0; i < NumberOfContexts; i++) {
- u64 SlowLockPrecedence = ThreadContexts[i].getSlowLockPrecedence();
- if (SlowLockPrecedence && SlowLockPrecedence < Precedence) {
- ThreadContext = &ThreadContexts[i];
- Precedence = SlowLockPrecedence;
+ // No luck, find the one with the lowest Precedence, and slow lock it.
+ u64 LowestPrecedence = UINT64_MAX;
+ for (u32 i = 0; i < NumberOfTSDs; i++) {
+ u64 Precedence = TSDs[i].getPrecedence();
+ if (Precedence && Precedence < LowestPrecedence) {
+ TSD = &TSDs[i];
+ LowestPrecedence = Precedence;
+ }
+ }
+ if (LIKELY(LowestPrecedence != UINT64_MAX)) {
+ TSD->lock();
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(TSD);
+ return TSD;
}
}
- if (LIKELY(Precedence != UINT64_MAX)) {
- ThreadContext->lock();
- *get_android_tls_ptr() = reinterpret_cast<uptr>(ThreadContext);
- return ThreadContext;
- }
- // Last resort (can this happen?), stick with the current one.
- ThreadContext =
- reinterpret_cast<ScudoThreadContext *>(*get_android_tls_ptr());
- ThreadContext->lock();
- return ThreadContext;
+ // Last resort, stick with the current one.
+ TSD = reinterpret_cast<ScudoTSD *>(*get_android_tls_ptr());
+ TSD->lock();
+ return TSD;
}
} // namespace __scudo
diff --git a/lib/scudo/scudo_tls_android.inc b/lib/scudo/scudo_tls_android.inc
index 6b82e49f5..9f3ef1a23 100644
--- a/lib/scudo/scudo_tls_android.inc
+++ b/lib/scudo/scudo_tls_android.inc
@@ -26,17 +26,16 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
initThread(MinimalInit);
}
-ScudoThreadContext *getThreadContextAndLockSlow();
+ScudoTSD *getTSDAndLockSlow();
-ALWAYS_INLINE ScudoThreadContext *getThreadContextAndLock() {
- ScudoThreadContext *ThreadContext =
- reinterpret_cast<ScudoThreadContext *>(*get_android_tls_ptr());
- CHECK(ThreadContext);
+ALWAYS_INLINE ScudoTSD *getTSDAndLock() {
+ ScudoTSD *TSD = reinterpret_cast<ScudoTSD *>(*get_android_tls_ptr());
+ CHECK(TSD);
// Try to lock the currently associated context.
- if (ThreadContext->tryLock())
- return ThreadContext;
+ if (TSD->tryLock())
+ return TSD;
// If it failed, go the slow path.
- return getThreadContextAndLockSlow();
+ return getTSDAndLockSlow();
}
#endif // SANITIZER_LINUX && SANITIZER_ANDROID
diff --git a/lib/scudo/scudo_tls_context_android.inc b/lib/scudo/scudo_tls_context_android.inc
index f1951319d..4787ec7b6 100644
--- a/lib/scudo/scudo_tls_context_android.inc
+++ b/lib/scudo/scudo_tls_context_android.inc
@@ -20,33 +20,33 @@
#if SANITIZER_LINUX && SANITIZER_ANDROID
-struct ScudoThreadContextPlatform {
+struct ScudoTSDPlatform {
INLINE bool tryLock() {
if (Mutex.TryLock()) {
- atomic_store_relaxed(&SlowLockPrecedence, 0);
+ atomic_store_relaxed(&Precedence, 0);
return true;
}
- if (atomic_load_relaxed(&SlowLockPrecedence) == 0)
- atomic_store_relaxed(&SlowLockPrecedence, NanoTime());
+ if (atomic_load_relaxed(&Precedence) == 0)
+ atomic_store_relaxed(&Precedence, NanoTime());
return false;
}
INLINE void lock() {
Mutex.Lock();
- atomic_store_relaxed(&SlowLockPrecedence, 0);
+ atomic_store_relaxed(&Precedence, 0);
}
INLINE void unlock() {
Mutex.Unlock();
}
- INLINE u64 getSlowLockPrecedence() {
- return atomic_load_relaxed(&SlowLockPrecedence);
+ INLINE u64 getPrecedence() {
+ return atomic_load_relaxed(&Precedence);
}
private:
StaticSpinMutex Mutex;
- atomic_uint64_t SlowLockPrecedence;
+ atomic_uint64_t Precedence;
};
#endif // SANITIZER_LINUX && SANITIZER_ANDROID
diff --git a/lib/scudo/scudo_tls_context_linux.inc b/lib/scudo/scudo_tls_context_linux.inc
index 8d292bdbc..9a24256f2 100644
--- a/lib/scudo/scudo_tls_context_linux.inc
+++ b/lib/scudo/scudo_tls_context_linux.inc
@@ -20,7 +20,7 @@
#if SANITIZER_LINUX && !SANITIZER_ANDROID
-struct ScudoThreadContextPlatform {
+struct ScudoTSDPlatform {
ALWAYS_INLINE void unlock() {}
};
diff --git a/lib/scudo/scudo_tls_linux.cpp b/lib/scudo/scudo_tls_linux.cpp
index f2592266d..845539457 100644
--- a/lib/scudo/scudo_tls_linux.cpp
+++ b/lib/scudo/scudo_tls_linux.cpp
@@ -28,7 +28,7 @@ static pthread_key_t PThreadKey;
__attribute__((tls_model("initial-exec")))
THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized;
__attribute__((tls_model("initial-exec")))
-THREADLOCAL ScudoThreadContext ThreadLocalContext;
+THREADLOCAL ScudoTSD TSD;
static void teardownThread(void *Ptr) {
uptr I = reinterpret_cast<uptr>(Ptr);
@@ -43,7 +43,7 @@ static void teardownThread(void *Ptr) {
reinterpret_cast<void *>(I - 1)) == 0))
return;
}
- ThreadLocalContext.commitBack();
+ TSD.commitBack();
ScudoThreadState = ThreadTornDown;
}
@@ -59,7 +59,7 @@ void initThread(bool MinimalInit) {
return;
CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
GetPthreadDestructorIterations())), 0);
- ThreadLocalContext.init();
+ TSD.init();
ScudoThreadState = ThreadInitialized;
}
diff --git a/lib/scudo/scudo_tls_linux.inc b/lib/scudo/scudo_tls_linux.inc
index 53b804485..492807c58 100644
--- a/lib/scudo/scudo_tls_linux.inc
+++ b/lib/scudo/scudo_tls_linux.inc
@@ -29,7 +29,7 @@ enum ThreadState : u8 {
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL ThreadState ScudoThreadState;
__attribute__((tls_model("initial-exec")))
-extern THREADLOCAL ScudoThreadContext ThreadLocalContext;
+extern THREADLOCAL ScudoTSD TSD;
ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
if (LIKELY(ScudoThreadState != ThreadNotInitialized))
@@ -37,10 +37,10 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
initThread(MinimalInit);
}
-ALWAYS_INLINE ScudoThreadContext *getThreadContextAndLock() {
+ALWAYS_INLINE ScudoTSD *getTSDAndLock() {
if (UNLIKELY(ScudoThreadState != ThreadInitialized))
return nullptr;
- return &ThreadLocalContext;
+ return &TSD;
}
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID