summaryrefslogtreecommitdiff
path: root/lib/scudo
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2016-12-13 19:31:54 +0000
committerKostya Kortchinsky <kostyak@google.com>2016-12-13 19:31:54 +0000
commit5b8087e9a144cb8a8bef13714978422bd1b08892 (patch)
tree7cf2e3ebd5470da16fd0c949efae0d8bf0ea46fa /lib/scudo
parent06f626a00f7c60c010593c856b13d69a5e8adae3 (diff)
Corrected D27428: Do not use the alignment-rounded-up size with secondary
Summary: I atually had an integer overflow on 32-bit with D27428 that didn't reproduce locally, as the test servers would manage allocate addresses in the 0xffffxxxx range, which led to some issues when rounding addresses. At this point, I feel that Scudo could benefit from having its own combined allocator, as we don't get any benefit from the current one, but have to work around some hurdles (alignment checks, rounding up that is no longer needed, extraneous code). Reviewers: kcc, alekseyshl Subscribers: llvm-commits, kubabrecka Differential Revision: https://reviews.llvm.org/D27681 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@289572 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/scudo')
-rw-r--r--lib/scudo/scudo_allocator.cpp15
-rw-r--r--lib/scudo/scudo_allocator_secondary.h40
2 files changed, 36 insertions, 19 deletions
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index 57a2be494..4f3b05ffb 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -402,12 +402,18 @@ struct Allocator {
Size = 1;
if (Size >= MaxAllowedMallocSize)
return BackendAllocator.ReturnNullOrDieOnBadRequest();
- uptr RoundedSize = RoundUpTo(Size, MinAlignment);
- uptr NeededSize = RoundedSize + AlignedChunkHeaderSize;
+
+ uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
if (Alignment > MinAlignment)
NeededSize += Alignment;
if (NeededSize >= MaxAllowedMallocSize)
return BackendAllocator.ReturnNullOrDieOnBadRequest();
+
+ // Primary backed and Secondary backed allocations have a different
+ // treatment. We deal with alignment requirements of Primary serviced
+ // allocations here, but the Secondary will take care of its own alignment
+ // needs, which means we also have to work around some limitations of the
+ // combined allocator to accommodate the situation.
bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
void *Ptr;
@@ -426,8 +432,11 @@ struct Allocator {
// If the allocation was serviced by the secondary, the returned pointer
// accounts for ChunkHeaderSize to pass the alignment check of the combined
// allocator. Adjust it here.
- if (!FromPrimary)
+ if (!FromPrimary) {
AllocBeg -= AlignedChunkHeaderSize;
+ if (Alignment > MinAlignment)
+ NeededSize -= Alignment;
+ }
uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize(
reinterpret_cast<void *>(AllocBeg));
diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h
index d3468f840..b984f0db4 100644
--- a/lib/scudo/scudo_allocator_secondary.h
+++ b/lib/scudo/scudo_allocator_secondary.h
@@ -32,32 +32,39 @@ class ScudoLargeMmapAllocator {
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
- uptr HeadersSize = sizeof(SecondaryHeader) + AlignedChunkHeaderSize;
- uptr MapSize = RoundUpTo(Size + sizeof(SecondaryHeader), PageSize);
+ uptr MapSize = Size + SecondaryHeaderSize;
+ MapSize = RoundUpTo(MapSize, PageSize);
// Account for 2 guard pages, one before and one after the chunk.
MapSize += 2 * PageSize;
- // Adding an extra Alignment is not required, it was done by the frontend.
+ // The size passed to the Secondary comprises the alignment, if large
+ // enough. Subtract it here to get the requested size, including header.
+ if (Alignment > MinAlignment)
+ Size -= Alignment;
+
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
if (MapBeg == ~static_cast<uptr>(0))
return ReturnNullOrDieOnOOM();
// A page-aligned pointer is assumed after that, so check it now.
CHECK(IsAligned(MapBeg, PageSize));
uptr MapEnd = MapBeg + MapSize;
+ // The beginning of the user area for that allocation comes after the
+ // initial guard page, and both headers. This is the pointer that has to
+ // abide by alignment requirements.
uptr UserBeg = MapBeg + PageSize + HeadersSize;
- // In the event of larger alignments, we will attempt to fit the mmap area
- // better and unmap extraneous memory. This will also ensure that the
+
+ // In the rare event of larger alignments, we will attempt to fit the mmap
+ // area better and unmap extraneous memory. This will also ensure that the
// offset and unused bytes field of the header stay small.
if (Alignment > MinAlignment) {
if (UserBeg & (Alignment - 1))
UserBeg += Alignment - (UserBeg & (Alignment - 1));
CHECK_GE(UserBeg, MapBeg);
- uptr NewMapBeg = UserBeg - HeadersSize;
- NewMapBeg = RoundDownTo(NewMapBeg, PageSize) - PageSize;
+ uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - PageSize;
CHECK_GE(NewMapBeg, MapBeg);
- uptr NewMapSize = RoundUpTo(MapSize - Alignment, PageSize);
- uptr NewMapEnd = NewMapBeg + NewMapSize;
+ uptr NewMapEnd = RoundUpTo(UserBeg + (Size - AlignedChunkHeaderSize),
+ PageSize) + PageSize;
CHECK_LE(NewMapEnd, MapEnd);
- // Unmap the extra memory if it's large enough.
+ // Unmap the extra memory if it's large enough, on both sides.
uptr Diff = NewMapBeg - MapBeg;
if (Diff > PageSize)
UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
@@ -65,14 +72,13 @@ class ScudoLargeMmapAllocator {
if (Diff > PageSize)
UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
MapBeg = NewMapBeg;
- MapSize = NewMapSize;
MapEnd = NewMapEnd;
+ MapSize = NewMapEnd - NewMapBeg;
}
- uptr UserEnd = UserBeg - AlignedChunkHeaderSize + Size;
- // For larger alignments, Alignment was added by the frontend to Size.
- if (Alignment > MinAlignment)
- UserEnd -= Alignment;
+
+ uptr UserEnd = UserBeg + (Size - AlignedChunkHeaderSize);
CHECK_LE(UserEnd, MapEnd - PageSize);
+ // Actually mmap the memory, preserving the guard pages on either side.
CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize)));
uptr Ptr = UserBeg - AlignedChunkHeaderSize;
@@ -84,7 +90,7 @@ class ScudoLargeMmapAllocator {
// the guard pages.
Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
- CHECK(IsAligned(UserBeg, Alignment));
+
return reinterpret_cast<void *>(UserBeg);
}
@@ -173,6 +179,8 @@ class ScudoLargeMmapAllocator {
return getHeader(reinterpret_cast<uptr>(Ptr));
}
+ const uptr SecondaryHeaderSize = sizeof(SecondaryHeader);
+ const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
uptr PageSize;
atomic_uint8_t MayReturnNull;
};