summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_combined.h16
-rw-r--r--lib/scudo/scudo_allocator.cpp15
-rw-r--r--lib/scudo/scudo_allocator_secondary.h40
3 files changed, 51 insertions, 20 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h
index de96e2768..19e1ae9b9 100644
--- a/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -49,16 +49,30 @@ class CombinedAllocator {
size = 1;
if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
+ uptr original_size = size;
+ // If alignment requirements are to be fulfilled by the frontend allocator
+ // rather than by the primary or secondary, passing an alignment lower than
+ // or equal to 8 will prevent any further rounding up, as well as the later
+ // alignment check.
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
bool from_primary = primary_.CanAllocate(size, alignment);
+ // The primary allocator should return a 2^x aligned allocation when
+ // requested 2^x bytes, hence using the rounded up 'size' when being
+ // serviced by the primary (this is no longer true when the primary is
+ // using a non-fixed base address). The secondary takes care of the
+ // alignment without such requirement, and allocating 'size' would use
+ // extraneous memory, so we employ 'original_size'.
if (from_primary)
res = cache->Allocate(&primary_, primary_.ClassID(size));
else
- res = secondary_.Allocate(&stats_, size, alignment);
+ res = secondary_.Allocate(&stats_, original_size, alignment);
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
+ // When serviced by the secondary, the chunk comes from a mmap allocation
+ // and will be zero'd out anyway. We only need to clear our the chunk if
+ // it was serviced by the primary, hence using the rounded up 'size'.
if (cleared && res && from_primary)
internal_bzero_aligned16(res, RoundUpTo(size, 16));
return res;
diff --git a/lib/scudo/scudo_allocator.cpp b/lib/scudo/scudo_allocator.cpp
index 57a2be494..4f3b05ffb 100644
--- a/lib/scudo/scudo_allocator.cpp
+++ b/lib/scudo/scudo_allocator.cpp
@@ -402,12 +402,18 @@ struct Allocator {
Size = 1;
if (Size >= MaxAllowedMallocSize)
return BackendAllocator.ReturnNullOrDieOnBadRequest();
- uptr RoundedSize = RoundUpTo(Size, MinAlignment);
- uptr NeededSize = RoundedSize + AlignedChunkHeaderSize;
+
+ uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize;
if (Alignment > MinAlignment)
NeededSize += Alignment;
if (NeededSize >= MaxAllowedMallocSize)
return BackendAllocator.ReturnNullOrDieOnBadRequest();
+
+ // Primary backed and Secondary backed allocations have a different
+ // treatment. We deal with alignment requirements of Primary serviced
+ // allocations here, but the Secondary will take care of its own alignment
+ // needs, which means we also have to work around some limitations of the
+ // combined allocator to accommodate the situation.
bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment);
void *Ptr;
@@ -426,8 +432,11 @@ struct Allocator {
// If the allocation was serviced by the secondary, the returned pointer
// accounts for ChunkHeaderSize to pass the alignment check of the combined
// allocator. Adjust it here.
- if (!FromPrimary)
+ if (!FromPrimary) {
AllocBeg -= AlignedChunkHeaderSize;
+ if (Alignment > MinAlignment)
+ NeededSize -= Alignment;
+ }
uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize(
reinterpret_cast<void *>(AllocBeg));
diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h
index d3468f840..b984f0db4 100644
--- a/lib/scudo/scudo_allocator_secondary.h
+++ b/lib/scudo/scudo_allocator_secondary.h
@@ -32,32 +32,39 @@ class ScudoLargeMmapAllocator {
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
- uptr HeadersSize = sizeof(SecondaryHeader) + AlignedChunkHeaderSize;
- uptr MapSize = RoundUpTo(Size + sizeof(SecondaryHeader), PageSize);
+ uptr MapSize = Size + SecondaryHeaderSize;
+ MapSize = RoundUpTo(MapSize, PageSize);
// Account for 2 guard pages, one before and one after the chunk.
MapSize += 2 * PageSize;
- // Adding an extra Alignment is not required, it was done by the frontend.
+ // The size passed to the Secondary comprises the alignment, if large
+ // enough. Subtract it here to get the requested size, including header.
+ if (Alignment > MinAlignment)
+ Size -= Alignment;
+
uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
if (MapBeg == ~static_cast<uptr>(0))
return ReturnNullOrDieOnOOM();
// A page-aligned pointer is assumed after that, so check it now.
CHECK(IsAligned(MapBeg, PageSize));
uptr MapEnd = MapBeg + MapSize;
+ // The beginning of the user area for that allocation comes after the
+ // initial guard page, and both headers. This is the pointer that has to
+ // abide by alignment requirements.
uptr UserBeg = MapBeg + PageSize + HeadersSize;
- // In the event of larger alignments, we will attempt to fit the mmap area
- // better and unmap extraneous memory. This will also ensure that the
+
+ // In the rare event of larger alignments, we will attempt to fit the mmap
+ // area better and unmap extraneous memory. This will also ensure that the
// offset and unused bytes field of the header stay small.
if (Alignment > MinAlignment) {
if (UserBeg & (Alignment - 1))
UserBeg += Alignment - (UserBeg & (Alignment - 1));
CHECK_GE(UserBeg, MapBeg);
- uptr NewMapBeg = UserBeg - HeadersSize;
- NewMapBeg = RoundDownTo(NewMapBeg, PageSize) - PageSize;
+ uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) - PageSize;
CHECK_GE(NewMapBeg, MapBeg);
- uptr NewMapSize = RoundUpTo(MapSize - Alignment, PageSize);
- uptr NewMapEnd = NewMapBeg + NewMapSize;
+ uptr NewMapEnd = RoundUpTo(UserBeg + (Size - AlignedChunkHeaderSize),
+ PageSize) + PageSize;
CHECK_LE(NewMapEnd, MapEnd);
- // Unmap the extra memory if it's large enough.
+ // Unmap the extra memory if it's large enough, on both sides.
uptr Diff = NewMapBeg - MapBeg;
if (Diff > PageSize)
UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
@@ -65,14 +72,13 @@ class ScudoLargeMmapAllocator {
if (Diff > PageSize)
UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
MapBeg = NewMapBeg;
- MapSize = NewMapSize;
MapEnd = NewMapEnd;
+ MapSize = NewMapEnd - NewMapBeg;
}
- uptr UserEnd = UserBeg - AlignedChunkHeaderSize + Size;
- // For larger alignments, Alignment was added by the frontend to Size.
- if (Alignment > MinAlignment)
- UserEnd -= Alignment;
+
+ uptr UserEnd = UserBeg + (Size - AlignedChunkHeaderSize);
CHECK_LE(UserEnd, MapEnd - PageSize);
+ // Actually mmap the memory, preserving the guard pages on either side.
CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize)));
uptr Ptr = UserBeg - AlignedChunkHeaderSize;
@@ -84,7 +90,7 @@ class ScudoLargeMmapAllocator {
// the guard pages.
Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
- CHECK(IsAligned(UserBeg, Alignment));
+
return reinterpret_cast<void *>(UserBeg);
}
@@ -173,6 +179,8 @@ class ScudoLargeMmapAllocator {
return getHeader(reinterpret_cast<uptr>(Ptr));
}
+ const uptr SecondaryHeaderSize = sizeof(SecondaryHeader);
+ const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
uptr PageSize;
atomic_uint8_t MayReturnNull;
};