summaryrefslogtreecommitdiff
path: root/lib/scudo/scudo_allocator.cpp
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2017-04-21 18:10:53 +0000
committerKostya Kortchinsky <kostyak@google.com>2017-04-21 18:10:53 +0000
commit9cb198789f2eb9672fdc0e22e1abb89d1ed8132a (patch)
tree8de59baf0dca7841c7197bf8389893fe11b9724b /lib/scudo/scudo_allocator.cpp
parentf94bfbbbbb7189d9e50ddf589a7567770859d0c0 (diff)
[scudo] Bypass Quarantine if its size is set to 0
Summary: In the current state of things, the deallocation path puts a chunk in the Quarantine whether it's enabled or not (size of 0). When the Quarantine is disabled, this results in the header being loaded (and checked) twice, and stored (and checksummed) once, in `deallocate` and `Recycle`. This change introduces a `quarantineOrDeallocateChunk` function that has a fast path to deallocation if the Quarantine is disabled. Even though this is not the preferred configuration security-wise, this change saves a sizeable amount of processing for that particular situation (which could be adopted by low memory devices). Additionally this simplifies a bit `deallocate` and `reallocate`. Reviewers: dvyukov, kcc, alekseyshl Reviewed By: dvyukov Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D32310 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@301015 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/scudo/scudo_allocator.cpp')
-rw-r--r--lib/scudo/scudo_allocator.cpp62
1 files changed, 36 insertions, 26 deletions
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index 9812fc0f5..e89e09223 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -460,6 +460,38 @@ struct ScudoAllocator {
return UserPtr;
}
+ // Place a chunk in the quarantine. In the event of a zero-sized quarantine,
+ // we directly deallocate the chunk, otherwise the flow would lead to the
+ // chunk being checksummed twice, once before Put and once in Recycle, with
+ // no additional security value.
+ void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header,
+ uptr Size) {
+ bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0);
+ if (BypassQuarantine) {
+ Chunk->eraseHeader();
+ void *Ptr = Chunk->getAllocBeg(Header);
+ if (LIKELY(!ThreadTornDown)) {
+ getBackendAllocator().Deallocate(&Cache, Ptr);
+ } else {
+ SpinMutexLock Lock(&FallbackMutex);
+ getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr);
+ }
+ } else {
+ UnpackedHeader NewHeader = *Header;
+ NewHeader.State = ChunkQuarantine;
+ Chunk->compareExchangeHeader(&NewHeader, Header);
+ if (LIKELY(!ThreadTornDown)) {
+ AllocatorQuarantine.Put(&ThreadQuarantineCache,
+ QuarantineCallback(&Cache), Chunk, Size);
+ } else {
+ SpinMutexLock l(&FallbackMutex);
+ AllocatorQuarantine.Put(&FallbackQuarantineCache,
+ QuarantineCallback(&FallbackAllocatorCache),
+ Chunk, Size);
+ }
+ }
+ }
+
// Deallocates a Chunk, which means adding it to the delayed free list (or
// Quarantine).
void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
@@ -499,24 +531,12 @@ struct ScudoAllocator {
}
}
- UnpackedHeader NewHeader = OldHeader;
- NewHeader.State = ChunkQuarantine;
- Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
-
// If a small memory amount was allocated with a larger alignment, we want
// to take that into account. Otherwise the Quarantine would be filled with
- // tiny chunks, taking a lot of VA memory. This an approximation of the
+ // tiny chunks, taking a lot of VA memory. This is an approximation of the
// usable size, that allows us to not call GetActuallyAllocatedSize.
uptr LiableSize = Size + (OldHeader.Offset << MinAlignment);
- if (LIKELY(!ThreadTornDown)) {
- AllocatorQuarantine.Put(&ThreadQuarantineCache,
- QuarantineCallback(&Cache), Chunk, LiableSize);
- } else {
- SpinMutexLock l(&FallbackMutex);
- AllocatorQuarantine.Put(&FallbackQuarantineCache,
- QuarantineCallback(&FallbackAllocatorCache),
- Chunk, LiableSize);
- }
+ quarantineOrDeallocateChunk(Chunk, &OldHeader, LiableSize);
}
// Reallocates a chunk. We can save on a new allocation if the new requested
@@ -541,11 +561,11 @@ struct ScudoAllocator {
OldPtr);
}
uptr UsableSize = Chunk->getUsableSize(&OldHeader);
- UnpackedHeader NewHeader = OldHeader;
// The new size still fits in the current chunk, and the size difference
// is reasonable.
if (NewSize <= UsableSize &&
(UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
+ UnpackedHeader NewHeader = OldHeader;
NewHeader.SizeOrUnusedBytes =
OldHeader.FromPrimary ? NewSize : UsableSize - NewSize;
Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
@@ -558,17 +578,7 @@ struct ScudoAllocator {
uptr OldSize = OldHeader.FromPrimary ? OldHeader.SizeOrUnusedBytes :
UsableSize - OldHeader.SizeOrUnusedBytes;
memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
- NewHeader.State = ChunkQuarantine;
- Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
- if (LIKELY(!ThreadTornDown)) {
- AllocatorQuarantine.Put(&ThreadQuarantineCache,
- QuarantineCallback(&Cache), Chunk, UsableSize);
- } else {
- SpinMutexLock l(&FallbackMutex);
- AllocatorQuarantine.Put(&FallbackQuarantineCache,
- QuarantineCallback(&FallbackAllocatorCache),
- Chunk, UsableSize);
- }
+ quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize);
}
return NewPtr;
}