summaryrefslogtreecommitdiff
path: root/lib/scudo/scudo_allocator_secondary.h
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2016-11-30 17:32:20 +0000
committerKostya Kortchinsky <kostyak@google.com>2016-11-30 17:32:20 +0000
commit41b6e3e5d80cd78fef3a2e2b409e00ee0c77f423 (patch)
tree6b1598b6325b28c9e4f51873cadc6f4ef31aa7d1 /lib/scudo/scudo_allocator_secondary.h
parentab8eb68f7e875f1501cdc2e5d6e70a1042d5d48c (diff)
[scudo] 32-bit and hardware agnostic support
Summary: This update introduces i386 support for the Scudo Hardened Allocator, and offers software alternatives for functions that used to require hardware specific instruction sets. This should make porting to new architectures easier. Among the changes: - The chunk header has been changed to accomodate the size limitations encountered on 32-bit architectures. We now fit everything in 64-bit. This was achieved by storing the amount of unused bytes in an allocation rather than the size itself, as one can be deduced from the other with the help of the GetActuallyAllocatedSize function. As it turns out, this header can be used for both 64 and 32 bit, and as such we dropped the requirement for the 128-bit compare and exchange instruction support (cmpxchg16b). - Add 32-bit support for the checksum and the PRNG functions: if the SSE 4.2 instruction set is supported, use the 32-bit CRC32 instruction, and in the XorShift128, use a 32-bit based state instead of 64-bit. - Add software support for CRC32: if SSE 4.2 is not supported, fallback on a software implementation. - Modify tests that were not 32-bit compliant, and expand them to cover more allocation and alignment sizes. The random shuffle test has been deactivated for linux-i386 & linux-i686 as the 32-bit sanitizer allocator doesn't currently randomize chunks. Reviewers: alekseyshl, kcc Subscribers: filcab, llvm-commits, tberghammer, danalbert, srhines, mgorny, modocache Differential Revision: https://reviews.llvm.org/D26358 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@288255 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/scudo/scudo_allocator_secondary.h')
-rw-r--r--lib/scudo/scudo_allocator_secondary.h29
1 files changed, 19 insertions, 10 deletions
diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h
index 451803c0c..4b62b8a35 100644
--- a/lib/scudo/scudo_allocator_secondary.h
+++ b/lib/scudo/scudo_allocator_secondary.h
@@ -32,7 +32,7 @@ class ScudoLargeMmapAllocator {
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
- uptr HeadersSize = sizeof(SecondaryHeader) + ChunkHeaderSize;
+ uptr HeadersSize = sizeof(SecondaryHeader) + AlignedChunkHeaderSize;
uptr MapSize = RoundUpTo(Size + sizeof(SecondaryHeader), PageSize);
// Account for 2 guard pages, one before and one after the chunk.
MapSize += 2 * PageSize;
@@ -52,27 +52,36 @@ class ScudoLargeMmapAllocator {
UserBeg += Alignment - (UserBeg & (Alignment - 1));
CHECK_GE(UserBeg, MapBeg);
uptr NewMapBeg = UserBeg - HeadersSize;
- NewMapBeg = (NewMapBeg & ~(PageSize - 1)) - PageSize;
+ NewMapBeg = RoundDownTo(NewMapBeg, PageSize) - PageSize;
CHECK_GE(NewMapBeg, MapBeg);
- uptr NewMapSize = MapEnd - NewMapBeg;
- uptr Diff = NewMapBeg - MapBeg;
+ uptr NewMapSize = RoundUpTo(MapSize - Alignment, PageSize);
+ uptr NewMapEnd = NewMapBeg + NewMapSize;
+ CHECK_LE(NewMapEnd, MapEnd);
// Unmap the extra memory if it's large enough.
+ uptr Diff = NewMapBeg - MapBeg;
if (Diff > PageSize)
UnmapOrDie(reinterpret_cast<void *>(MapBeg), Diff);
+ Diff = MapEnd - NewMapEnd;
+ if (Diff > PageSize)
+ UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), Diff);
MapBeg = NewMapBeg;
MapSize = NewMapSize;
+ MapEnd = NewMapEnd;
}
- uptr UserEnd = UserBeg - ChunkHeaderSize + Size;
+ uptr UserEnd = UserBeg - AlignedChunkHeaderSize + Size;
// For larger alignments, Alignment was added by the frontend to Size.
if (Alignment > MinAlignment)
UserEnd -= Alignment;
CHECK_LE(UserEnd, MapEnd - PageSize);
CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize)));
- uptr Ptr = UserBeg - ChunkHeaderSize;
+ uptr Ptr = UserBeg - AlignedChunkHeaderSize;
SecondaryHeader *Header = getHeader(Ptr);
Header->MapBeg = MapBeg;
Header->MapSize = MapSize;
+ // The primary adds the whole class size to the stats when allocating a
+ // chunk, so we will do something similar here. But we will not account for
+ // the guard pages.
Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
CHECK(IsAligned(UserBeg, Alignment));
@@ -97,8 +106,8 @@ class ScudoLargeMmapAllocator {
void Deallocate(AllocatorStats *Stats, void *Ptr) {
SecondaryHeader *Header = getHeader(Ptr);
- Stats->Sub(AllocatorStatAllocated, Header->MapSize);
- Stats->Sub(AllocatorStatMapped, Header->MapSize);
+ Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize);
+ Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize);
UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
}
@@ -154,8 +163,8 @@ class ScudoLargeMmapAllocator {
uptr MapBeg;
uptr MapSize;
};
- // Check that sizeof(SecondaryHeader) is a multiple of 16.
- COMPILER_CHECK((sizeof(SecondaryHeader) & 0xf) == 0);
+ // Check that sizeof(SecondaryHeader) is a multiple of MinAlignment.
+ COMPILER_CHECK((sizeof(SecondaryHeader) & (MinAlignment - 1)) == 0);
SecondaryHeader *getHeader(uptr Ptr) {
return reinterpret_cast<SecondaryHeader*>(Ptr - sizeof(SecondaryHeader));