summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2017-12-06 16:53:24 +0000
committerKostya Kortchinsky <kostyak@google.com>2017-12-06 16:53:24 +0000
commitc89976de25cd7700d24796f7f2fa9f20e51e4813 (patch)
tree1dd9bd94be551bef79ec8a8a84deafdc474800fb /lib
parent92feb463e158b8a1492c04c281fa24c9674be4e8 (diff)
[scudo] Correct performance regression in Secondary
Summary: This wasn't noticed: `RoundUpTo` doesn't produce a constant expression, so the sizes were not constant either. Enforce them to be static const, replace `RoundUpTo` by its expression. The compiler can now optimize the associated computations accordingly. Also looking at the produced assembly, `PageSize` was fetched multiple times during `Allocate`, so keep a local value of it. As a result it's fetched once and kept in a register. Reviewers: alekseyshl, flowerhack Reviewed By: alekseyshl Subscribers: llvm-commits Differential Revision: https://reviews.llvm.org/D40862 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@319903 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/scudo/scudo_allocator_secondary.h18
1 files changed, 10 insertions, 8 deletions
diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h
index 1117d51d3..f2002ed98 100644
--- a/lib/scudo/scudo_allocator_secondary.h
+++ b/lib/scudo/scudo_allocator_secondary.h
@@ -24,16 +24,17 @@
class ScudoLargeMmapAllocator {
public:
void Init() {
- PageSize = GetPageSizeCached();
+ PageSizeCached = GetPageSizeCached();
}
void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
- uptr UserSize = Size - AlignedChunkHeaderSize;
+ const uptr UserSize = Size - AlignedChunkHeaderSize;
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
uptr MapSize = Size + AlignedReservedAddressRangeSize;
if (Alignment > MinAlignment)
MapSize += Alignment;
+ const uptr PageSize = PageSizeCached;
MapSize = RoundUpTo(MapSize, PageSize);
// Account for 2 guard pages, one before and one after the chunk.
MapSize += 2 * PageSize;
@@ -79,7 +80,7 @@ class ScudoLargeMmapAllocator {
// Actually mmap the memory, preserving the guard pages on either side
CHECK_EQ(MapBeg + PageSize,
AddressRange.Map(MapBeg + PageSize, MapSize - 2 * PageSize));
- uptr Ptr = UserBeg - AlignedChunkHeaderSize;
+ const uptr Ptr = UserBeg - AlignedChunkHeaderSize;
ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
*StoredRange = AddressRange;
@@ -98,6 +99,7 @@ class ScudoLargeMmapAllocator {
void Deallocate(AllocatorStats *Stats, void *Ptr) {
// Since we're unmapping the entirety of where the ReservedAddressRange
// actually is, copy onto the stack.
+ const uptr PageSize = PageSizeCached;
ReservedAddressRange AddressRange = *getReservedAddressRange(Ptr);
{
SpinMutexLock l(&StatsMutex);
@@ -113,7 +115,7 @@ class ScudoLargeMmapAllocator {
// Deduct PageSize as ReservedAddressRange size includes the trailing guard
// page.
uptr MapEnd = reinterpret_cast<uptr>(StoredRange->base()) +
- StoredRange->size() - PageSize;
+ StoredRange->size() - PageSizeCached;
return MapEnd - reinterpret_cast<uptr>(Ptr);
}
@@ -126,12 +128,12 @@ class ScudoLargeMmapAllocator {
return getReservedAddressRange(reinterpret_cast<uptr>(Ptr));
}
- const uptr AlignedReservedAddressRangeSize =
- RoundUpTo(sizeof(ReservedAddressRange), MinAlignment);
- const uptr HeadersSize =
+ static constexpr uptr AlignedReservedAddressRangeSize =
+ (sizeof(ReservedAddressRange) + MinAlignment - 1) & ~(MinAlignment - 1);
+ static constexpr uptr HeadersSize =
AlignedReservedAddressRangeSize + AlignedChunkHeaderSize;
- uptr PageSize;
+ uptr PageSizeCached;
SpinMutex StatsMutex;
};