summaryrefslogtreecommitdiff
path: root/lib/scudo/scudo_allocator_secondary.h
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2017-05-11 21:40:45 +0000
committerKostya Kortchinsky <kostyak@google.com>2017-05-11 21:40:45 +0000
commita5c91c22f175eddf6f2d4d244dce6ce6a2e3c9e1 (patch)
treec41661f9121d0bac55adaa7b6fb6e95d860933bd /lib/scudo/scudo_allocator_secondary.h
parent39596416fe951311daf94abb3ac269dcbb694735 (diff)
[scudo] Use our own combined allocator
Summary: The reasoning behind this change is twofold: - the current combined allocator (sanitizer_allocator_combined.h) implements features that are not relevant for Scudo, making some code redundant, and some restrictions not pertinent (alignments for example). This forced us to do some weird things between the frontend and our secondary to make things work; - we have enough information to be able to know if a chunk will be serviced by the Primary or Secondary, allowing us to avoid extraneous calls to functions such as `PointerIsMine` or `CanAllocate`. As a result, the new scudo-specific combined allocator is very straightforward, and allows us to remove some now unnecessary code both in the frontend and the secondary. Unused functions have been left in as unimplemented for now. It turns out to also be a sizeable performance gain (3% faster in some Android memory_replay benchmarks, doing some more on other platforms). Reviewers: alekseyshl, kcc, dvyukov Reviewed By: alekseyshl Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D33007 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@302830 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/scudo/scudo_allocator_secondary.h')
-rw-r--r--lib/scudo/scudo_allocator_secondary.h101
1 files changed, 26 insertions, 75 deletions
diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h
index fbc7f247d..2950909b5 100644
--- a/lib/scudo/scudo_allocator_secondary.h
+++ b/lib/scudo/scudo_allocator_secondary.h
@@ -26,20 +26,19 @@ class ScudoLargeMmapAllocator {
void Init(bool AllocatorMayReturnNull) {
PageSize = GetPageSizeCached();
- atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_relaxed);
+ atomic_store_relaxed(&MayReturnNull, AllocatorMayReturnNull);
}
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
+ uptr UserSize = Size - AlignedChunkHeaderSize;
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
uptr MapSize = Size + SecondaryHeaderSize;
+ if (Alignment > MinAlignment)
+ MapSize += Alignment;
MapSize = RoundUpTo(MapSize, PageSize);
// Account for 2 guard pages, one before and one after the chunk.
MapSize += 2 * PageSize;
- // The size passed to the Secondary comprises the alignment, if large
- // enough. Subtract it here to get the requested size, including header.
- if (Alignment > MinAlignment)
- Size -= Alignment;
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
if (MapBeg == ~static_cast<uptr>(0))
@@ -51,32 +50,32 @@ class ScudoLargeMmapAllocator {
// initial guard page, and both headers. This is the pointer that has to
// abide by alignment requirements.
uptr UserBeg = MapBeg + PageSize + HeadersSize;
+ uptr UserEnd = UserBeg + UserSize;
// In the rare event of larger alignments, we will attempt to fit the mmap
// area better and unmap extraneous memory. This will also ensure that the
// offset and unused bytes field of the header stay small.
if (Alignment > MinAlignment) {
- if (UserBeg & (Alignment - 1))
- UserBeg += Alignment - (UserBeg & (Alignment - 1));
- CHECK_GE(UserBeg, MapBeg);
- uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - PageSize;
- CHECK_GE(NewMapBeg, MapBeg);
- uptr NewMapEnd = RoundUpTo(UserBeg + (Size - AlignedChunkHeaderSize),
- PageSize) + PageSize;
- CHECK_LE(NewMapEnd, MapEnd);
- // Unmap the extra memory if it's large enough, on both sides.
- uptr Diff = NewMapBeg - MapBeg;
- if (Diff > PageSize)
- UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
- Diff = MapEnd - NewMapEnd;
- if (Diff > PageSize)
- UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
- MapBeg = NewMapBeg;
- MapEnd = NewMapEnd;
- MapSize = NewMapEnd - NewMapBeg;
+ if (!IsAligned(UserBeg, Alignment)) {
+ UserBeg = RoundUpTo(UserBeg, Alignment);
+ CHECK_GE(UserBeg, MapBeg);
+ uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) -
+ PageSize;
+ CHECK_GE(NewMapBeg, MapBeg);
+ if (NewMapBeg != MapBeg) {
+ UnmapOrDie(reinterpret_cast<void *>(MapBeg), NewMapBeg - MapBeg);
+ MapBeg = NewMapBeg;
+ }
+ UserEnd = UserBeg + UserSize;
+ }
+ uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize;
+ if (NewMapEnd != MapEnd) {
+ UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd);
+ MapEnd = NewMapEnd;
+ }
+ MapSize = MapEnd - MapBeg;
}
- uptr UserEnd = UserBeg + (Size - AlignedChunkHeaderSize);
CHECK_LE(UserEnd, MapEnd - PageSize);
// Actually mmap the memory, preserving the guard pages on either side.
CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
@@ -94,25 +93,15 @@ class ScudoLargeMmapAllocator {
Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
}
- return reinterpret_cast<void *>(UserBeg);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (atomic_load(&MayReturnNull, memory_order_acquire))
- return nullptr;
- ReportAllocatorCannotReturnNull(false);
+ return reinterpret_cast<void *>(Ptr);
}
void *ReturnNullOrDieOnOOM() {
- if (atomic_load(&MayReturnNull, memory_order_acquire))
+ if (atomic_load_relaxed(&MayReturnNull))
return nullptr;
ReportAllocatorCannotReturnNull(true);
}
- void SetMayReturnNull(bool AllocatorMayReturnNull) {
- atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_release);
- }
-
void Deallocate(AllocatorStats *Stats, void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
{
@@ -123,14 +112,6 @@ class ScudoLargeMmapAllocator {
UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
}
- uptr TotalMemoryUsed() {
- UNIMPLEMENTED();
- }
-
- bool PointerIsMine(const void *Ptr) {
- UNIMPLEMENTED();
- }
-
uptr GetActuallyAllocatedSize(void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
// Deduct PageSize as MapSize includes the trailing guard page.
@@ -138,39 +119,9 @@ class ScudoLargeMmapAllocator {
return MapEnd - reinterpret_cast<uptr>(Ptr);
}
- void *GetMetaData(const void *Ptr) {
- UNIMPLEMENTED();
- }
-
- void *GetBlockBegin(const void *Ptr) {
- UNIMPLEMENTED();
- }
-
- void *GetBlockBeginFastLocked(void *Ptr) {
- UNIMPLEMENTED();
- }
-
- void PrintStats() {
- UNIMPLEMENTED();
- }
-
- void ForceLock() {
- UNIMPLEMENTED();
- }
-
- void ForceUnlock() {
- UNIMPLEMENTED();
- }
-
- void ForEachChunk(ForEachChunkCallback Callback, void *Arg) {
- UNIMPLEMENTED();
- }
-
private:
// A Secondary allocated chunk header contains the base of the mapping and
- // its size. Currently, the base is always a page before the header, but
- // we might want to extend that number in the future based on the size of
- // the allocation.
+ // its size, which comprises the guard pages.
struct SecondaryHeader {
uptr MapBeg;
uptr MapSize;