summaryrefslogtreecommitdiff
path: root/lib/scudo/scudo_allocator.cpp
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2018-07-20 15:07:17 +0000
committerKostya Kortchinsky <kostyak@google.com>2018-07-20 15:07:17 +0000
commit4f6d4b8d42668e3b665636f8a0e8a3e2cec32483 (patch)
treed99b855f28a0c74ee2b31e44c21db836a0e83874 /lib/scudo/scudo_allocator.cpp
parent3cd9b54748fd042ae510388c0b711e478a957b00 (diff)
[scudo] Simplify internal names (NFC)
Summary: There is currently too much redundancy in the class/variable/* names in Scudo: - we are in the namespace `__scudo`, so there is no point in having something named `ScudoX` to end up with a final name of `__scudo::ScudoX`; - there are a lot of types/* that have `Allocator` in the name, given that Scudo is an allocator I figure this doubles up as well. So change a bunch of the Scudo names to make them shorter, less redundant, and overall simpler. They should still be pretty self explaining (or at least it looks so to me). The TSD part will be done in another CL (eg `__scudo::ScudoTSD`). Reviewers: alekseyshl, eugenis Reviewed By: alekseyshl Subscribers: delcypher, #sanitizers, llvm-commits Differential Revision: https://reviews.llvm.org/D49505 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@337557 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/scudo/scudo_allocator.cpp')
-rw-r--r--lib/scudo/scudo_allocator.cpp88
1 files changed, 42 insertions, 46 deletions
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index 82418a52c..4a11bf5fc 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -62,7 +62,7 @@ INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
}
-static ScudoBackendAllocator &getBackendAllocator();
+static BackendT &getBackend();
namespace Chunk {
static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
@@ -92,9 +92,9 @@ namespace Chunk {
static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
const uptr ClassId = Header->ClassId;
if (ClassId)
- return PrimaryAllocator::ClassIdToSize(ClassId) - getHeaderSize() -
+ return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
(Header->Offset << MinAlignmentLog);
- return SecondaryAllocator::GetActuallyAllocatedSize(
+ return SecondaryT::GetActuallyAllocatedSize(
getBackendPtr(Ptr, Header)) - getHeaderSize();
}
@@ -103,7 +103,7 @@ namespace Chunk {
const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
if (Header->ClassId)
return SizeOrUnusedBytes;
- return SecondaryAllocator::GetActuallyAllocatedSize(
+ return SecondaryT::GetActuallyAllocatedSize(
getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
}
@@ -175,7 +175,7 @@ namespace Chunk {
} // namespace Chunk
struct QuarantineCallback {
- explicit QuarantineCallback(AllocatorCache *Cache)
+ explicit QuarantineCallback(AllocatorCacheT *Cache)
: Cache_(Cache) {}
// Chunk recycling function, returns a quarantined chunk to the backend,
@@ -188,10 +188,9 @@ struct QuarantineCallback {
Chunk::eraseHeader(Ptr);
void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
if (Header.ClassId)
- getBackendAllocator().deallocatePrimary(Cache_, BackendPtr,
- Header.ClassId);
+ getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
else
- getBackendAllocator().deallocateSecondary(BackendPtr);
+ getBackend().deallocateSecondary(BackendPtr);
}
// Internal quarantine allocation and deallocation functions. We first check
@@ -199,34 +198,33 @@ struct QuarantineCallback {
// TODO(kostyak): figure out the best way to protect the batches.
void *Allocate(uptr Size) {
const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
- return getBackendAllocator().allocatePrimary(Cache_, BatchClassId);
+ return getBackend().allocatePrimary(Cache_, BatchClassId);
}
void Deallocate(void *Ptr) {
const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
- getBackendAllocator().deallocatePrimary(Cache_, Ptr, BatchClassId);
+ getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
}
- AllocatorCache *Cache_;
+ AllocatorCacheT *Cache_;
COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
};
-typedef Quarantine<QuarantineCallback, void> ScudoQuarantine;
-typedef ScudoQuarantine::Cache ScudoQuarantineCache;
-COMPILER_CHECK(sizeof(ScudoQuarantineCache) <=
+typedef Quarantine<QuarantineCallback, void> QuarantineT;
+typedef QuarantineT::Cache QuarantineCacheT;
+COMPILER_CHECK(sizeof(QuarantineCacheT) <=
sizeof(ScudoTSD::QuarantineCachePlaceHolder));
-ScudoQuarantineCache *getQuarantineCache(ScudoTSD *TSD) {
- return reinterpret_cast<ScudoQuarantineCache *>(
- TSD->QuarantineCachePlaceHolder);
+QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
+ return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
}
-struct ScudoAllocator {
+struct Allocator {
static const uptr MaxAllowedMallocSize =
FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
- ScudoBackendAllocator BackendAllocator;
- ScudoQuarantine AllocatorQuarantine;
+ BackendT Backend;
+ QuarantineT Quarantine;
u32 QuarantineChunksUpToSize;
@@ -240,8 +238,8 @@ struct ScudoAllocator {
atomic_uint8_t RssLimitExceeded;
atomic_uint64_t RssLastCheckedAtNS;
- explicit ScudoAllocator(LinkerInitialized)
- : AllocatorQuarantine(LINKER_INITIALIZED) {}
+ explicit Allocator(LinkerInitialized)
+ : Quarantine(LINKER_INITIALIZED) {}
NOINLINE void performSanityChecks();
@@ -260,10 +258,10 @@ struct ScudoAllocator {
atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
- BackendAllocator.init(common_flags()->allocator_release_to_os_interval_ms);
+ Backend.init(common_flags()->allocator_release_to_os_interval_ms);
HardRssLimitMb = common_flags()->hard_rss_limit_mb;
SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
- AllocatorQuarantine.Init(
+ Quarantine.Init(
static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
QuarantineChunksUpToSize = getFlags()->QuarantineChunksUpToSize;
@@ -329,18 +327,18 @@ struct ScudoAllocator {
void *BackendPtr;
uptr BackendSize;
u8 ClassId;
- if (PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment)) {
+ if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
BackendSize = AlignedSize;
ClassId = SizeClassMap::ClassID(BackendSize);
bool UnlockRequired;
ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
- BackendPtr = BackendAllocator.allocatePrimary(&TSD->Cache, ClassId);
+ BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
if (UnlockRequired)
TSD->unlock();
} else {
BackendSize = NeededSize;
ClassId = 0;
- BackendPtr = BackendAllocator.allocateSecondary(BackendSize, Alignment);
+ BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
}
if (UNLIKELY(!BackendPtr)) {
SetAllocatorOutOfMemory();
@@ -351,7 +349,7 @@ struct ScudoAllocator {
// If requested, we will zero out the entire contents of the returned chunk.
if ((ForceZeroContents || ZeroContents) && ClassId)
- memset(BackendPtr, 0, PrimaryAllocator::ClassIdToSize(ClassId));
+ memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
UnpackedHeader Header = {};
uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
@@ -391,7 +389,7 @@ struct ScudoAllocator {
// quarantine chunk size threshold.
void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
uptr Size) {
- const bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0) ||
+ const bool BypassQuarantine = (Quarantine.GetCacheSize() == 0) ||
(Size > QuarantineChunksUpToSize);
if (BypassQuarantine) {
Chunk::eraseHeader(Ptr);
@@ -399,12 +397,12 @@ struct ScudoAllocator {
if (Header->ClassId) {
bool UnlockRequired;
ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
- getBackendAllocator().deallocatePrimary(&TSD->Cache, BackendPtr,
- Header->ClassId);
+ getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
+ Header->ClassId);
if (UnlockRequired)
TSD->unlock();
} else {
- getBackendAllocator().deallocateSecondary(BackendPtr);
+ getBackend().deallocateSecondary(BackendPtr);
}
} else {
// If a small memory amount was allocated with a larger alignment, we want
@@ -418,9 +416,8 @@ struct ScudoAllocator {
Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
bool UnlockRequired;
ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
- AllocatorQuarantine.Put(getQuarantineCache(TSD),
- QuarantineCallback(&TSD->Cache), Ptr,
- EstimatedSize);
+ Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
+ Ptr, EstimatedSize);
if (UnlockRequired)
TSD->unlock();
}
@@ -530,15 +527,14 @@ struct ScudoAllocator {
}
void commitBack(ScudoTSD *TSD) {
- AllocatorQuarantine.Drain(getQuarantineCache(TSD),
- QuarantineCallback(&TSD->Cache));
- BackendAllocator.destroyCache(&TSD->Cache);
+ Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
+ Backend.destroyCache(&TSD->Cache);
}
uptr getStats(AllocatorStat StatType) {
initThreadMaybe();
uptr stats[AllocatorStatCount];
- BackendAllocator.getStats(stats);
+ Backend.getStats(stats);
return stats[StatType];
}
@@ -557,11 +553,11 @@ struct ScudoAllocator {
void printStats() {
initThreadMaybe();
- BackendAllocator.printStats();
+ Backend.printStats();
}
};
-NOINLINE void ScudoAllocator::performSanityChecks() {
+NOINLINE void Allocator::performSanityChecks() {
// Verify that the header offset field can hold the maximum offset. In the
// case of the Secondary allocator, it takes care of alignment and the
// offset will always be 0. In the case of the Primary, the worst case
@@ -596,7 +592,7 @@ NOINLINE void ScudoAllocator::performSanityChecks() {
// Opportunistic RSS limit check. This will update the RSS limit status, if
// it can, every 100ms, otherwise it will just return the current one.
-NOINLINE bool ScudoAllocator::isRssLimitExceeded() {
+NOINLINE bool Allocator::isRssLimitExceeded() {
u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
const u64 CurrentCheck = MonotonicNanoTime();
if (LIKELY(CurrentCheck < LastCheck + (100ULL * 1000000ULL)))
@@ -626,10 +622,10 @@ NOINLINE bool ScudoAllocator::isRssLimitExceeded() {
return atomic_load_relaxed(&RssLimitExceeded);
}
-static ScudoAllocator Instance(LINKER_INITIALIZED);
+static Allocator Instance(LINKER_INITIALIZED);
-static ScudoBackendAllocator &getBackendAllocator() {
- return Instance.BackendAllocator;
+static BackendT &getBackend() {
+ return Instance.Backend;
}
void initScudo() {
@@ -637,7 +633,7 @@ void initScudo() {
}
void ScudoTSD::init() {
- getBackendAllocator().initCache(&Cache);
+ getBackend().initCache(&Cache);
memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
}