summaryrefslogtreecommitdiff
path: root/lib/scudo/scudo_allocator.cpp
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2017-05-11 21:40:45 +0000
committerKostya Kortchinsky <kostyak@google.com>2017-05-11 21:40:45 +0000
commita5c91c22f175eddf6f2d4d244dce6ce6a2e3c9e1 (patch)
treec41661f9121d0bac55adaa7b6fb6e95d860933bd /lib/scudo/scudo_allocator.cpp
parent39596416fe951311daf94abb3ac269dcbb694735 (diff)
[scudo] Use our own combined allocator
Summary: The reasoning behind this change is twofold: - the current combined allocator (sanitizer_allocator_combined.h) implements features that are not relevant for Scudo, making some code redundant, and some restrictions not pertinent (alignments for example). This forced us to do some weird things between the frontend and our secondary to make things work; - we have enough information to be able to know if a chunk will be serviced by the Primary or Secondary, allowing us to avoid extraneous calls to functions such as `PointerIsMine` or `CanAllocate`. As a result, the new scudo-specific combined allocator is very straightforward, and allows us to remove some now unnecessary code both in the frontend and the secondary. Unused functions have been left in as unimplemented for now. It turns out to also be a sizeable performance gain (3% faster in some Android memory_replay benchmarks, doing some more on other platforms). Reviewers: alekseyshl, kcc, dvyukov Reviewed By: alekseyshl Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D33007 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@302830 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/scudo/scudo_allocator.cpp')
-rw-r--r--lib/scudo/scudo_allocator.cpp80
1 files changed, 42 insertions, 38 deletions
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index 2b7f099df..ce69ddf55 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -73,8 +73,9 @@ struct ScudoChunk : UnpackedHeader {
// Returns the usable size for a chunk, meaning the amount of bytes from the
// beginning of the user data to the end of the backend allocated chunk.
uptr getUsableSize(UnpackedHeader *Header) {
- uptr Size = getBackendAllocator().GetActuallyAllocatedSize(
- getAllocBeg(Header));
+ uptr Size =
+ getBackendAllocator().GetActuallyAllocatedSize(getAllocBeg(Header),
+ Header->FromPrimary);
if (Size == 0)
return 0;
return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog);
@@ -221,7 +222,8 @@ struct QuarantineCallback {
explicit QuarantineCallback(AllocatorCache *Cache)
: Cache_(Cache) {}
- // Chunk recycling function, returns a quarantined chunk to the backend.
+ // Chunk recycling function, returns a quarantined chunk to the backend,
+ // first making sure it hasn't been tampered with.
void Recycle(ScudoChunk *Chunk) {
UnpackedHeader Header;
Chunk->loadHeader(&Header);
@@ -231,17 +233,19 @@ struct QuarantineCallback {
}
Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(&Header);
- getBackendAllocator().Deallocate(Cache_, Ptr);
+ getBackendAllocator().Deallocate(Cache_, Ptr, Header.FromPrimary);
}
- // Internal quarantine allocation and deallocation functions.
+ // Internal quarantine allocation and deallocation functions. We first check
+ // that the batches are indeed serviced by the Primary.
+ // TODO(kostyak): figure out the best way to protect the batches.
+ COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
void *Allocate(uptr Size) {
- // TODO(kostyak): figure out the best way to protect the batches.
- return getBackendAllocator().Allocate(Cache_, Size, MinAlignment);
+ return getBackendAllocator().Allocate(Cache_, Size, MinAlignment, true);
}
void Deallocate(void *Ptr) {
- getBackendAllocator().Deallocate(Cache_, Ptr);
+ getBackendAllocator().Deallocate(Cache_, Ptr, true);
}
AllocatorCache *Cache_;
@@ -359,58 +363,55 @@ struct ScudoAllocator {
Size = 1;
uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
- if (Alignment > MinAlignment)
- NeededSize += Alignment;
- if (NeededSize >= MaxAllowedMallocSize)
+ uptr AlignedSize = (Alignment > MinAlignment) ?
+ NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize;
+ if (AlignedSize >= MaxAllowedMallocSize)
return BackendAllocator.ReturnNullOrDieOnBadRequest();
- // Primary backed and Secondary backed allocations have a different
- // treatment. We deal with alignment requirements of Primary serviced
- // allocations here, but the Secondary will take care of its own alignment
- // needs, which means we also have to work around some limitations of the
- // combined allocator to accommodate the situation.
- bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
+ // Primary and Secondary backed allocations have a different treatment. We
+ // deal with alignment requirements of Primary serviced allocations here,
+ // but the Secondary will take care of its own alignment needs.
+ bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment);
void *Ptr;
uptr Salt;
+ uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize;
uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment;
ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
Salt = getPrng(ThreadContext)->getNext();
Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext),
- NeededSize, AllocationAlignment);
+ AllocationSize, AllocationAlignment,
+ FromPrimary);
ThreadContext->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
Salt = FallbackPrng.getNext();
- Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
- AllocationAlignment);
+ Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize,
+ AllocationAlignment, FromPrimary);
}
if (!Ptr)
return BackendAllocator.ReturnNullOrDieOnOOM();
- uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
- // If the allocation was serviced by the secondary, the returned pointer
- // accounts for ChunkHeaderSize to pass the alignment check of the combined
- // allocator. Adjust it here.
- if (!FromPrimary) {
- AllocBeg -= AlignedChunkHeaderSize;
- if (Alignment > MinAlignment)
- NeededSize -= Alignment;
- }
-
// If requested, we will zero out the entire contents of the returned chunk.
if ((ForceZeroContents || ZeroContents) && FromPrimary)
- memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
+ memset(Ptr, 0,
+ BackendAllocator.GetActuallyAllocatedSize(Ptr, FromPrimary));
+ UnpackedHeader Header = {};
+ uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
uptr UserBeg = AllocBeg + AlignedChunkHeaderSize;
- if (!IsAligned(UserBeg, Alignment))
+ if (!IsAligned(UserBeg, Alignment)) {
+ // Since the Secondary takes care of alignment, a non-aligned pointer
+ // means it is from the Primary. It is also the only case where the offset
+ // field of the header would be non-zero.
+ CHECK(FromPrimary);
UserBeg = RoundUpTo(UserBeg, Alignment);
- CHECK_LE(UserBeg + Size, AllocBeg + NeededSize);
- UnpackedHeader Header = {};
+ uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
+ Header.Offset = Offset >> MinAlignmentLog;
+ }
+ CHECK_LE(UserBeg + Size, AllocBeg + AllocationSize);
Header.State = ChunkAllocated;
- uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
- Header.Offset = Offset >> MinAlignmentLog;
Header.AllocType = Type;
if (FromPrimary) {
Header.FromPrimary = FromPrimary;
@@ -437,17 +438,20 @@ struct ScudoAllocator {
// with no additional security value.
void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
uptr Size) {
+ bool FromPrimary = Header->FromPrimary;
bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
if (BypassQuarantine) {
Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(Header);
ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
- getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr);
+ getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr,
+ FromPrimary);
ThreadContext->unlock();
} else {
SpinMutexLock Lock(&FallbackMutex);
- getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr);
+ getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr,
+ FromPrimary);
}
} else {
UnpackedHeader NewHeader = *Header;