summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/sanitizer_common/sanitizer_fuchsia.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_posix_libcdep.cc4
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc4
-rw-r--r--lib/scudo/scudo_allocator_secondary.h65
4 files changed, 45 insertions, 32 deletions
diff --git a/lib/sanitizer_common/sanitizer_fuchsia.cc b/lib/sanitizer_common/sanitizer_fuchsia.cc
index c03c9939c..c130c10c2 100644
--- a/lib/sanitizer_common/sanitizer_fuchsia.cc
+++ b/lib/sanitizer_common/sanitizer_fuchsia.cc
@@ -266,6 +266,10 @@ void ReservedAddressRange::Unmap(uptr addr, uptr size) {
CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_));
CHECK_LE(size, size_);
UnmapOrDie(reinterpret_cast<void*>(addr), size);
+ if (addr_as_void == base_) {
+ base_ = reinterpret_cast<void*>(addr + size);
+ }
+ size_ = size_ - size;
}
// MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator.
diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
index 40b5f8a4e..20a5f42e4 100644
--- a/lib/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -370,6 +370,10 @@ void ReservedAddressRange::Unmap(uptr addr, uptr size) {
CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_));
CHECK_LE(size, size_);
UnmapOrDie(reinterpret_cast<void*>(addr), size);
+ if (addr_as_void == base_) {
+ base_ = reinterpret_cast<void*>(addr + size);
+ }
+ size_ = size_ - size;
}
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index dee1add82..0d72b494d 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -255,6 +255,10 @@ void ReservedAddressRange::Unmap(uptr addr, uptr size) {
// Only unmap if it covers the entire range.
CHECK((addr == base_as_uptr) && (size == size_));
UnmapOrDie(addr_as_void, size);
+ if (addr_as_void == base_) {
+ base_ = reinterpret_cast<void*>(addr + size);
+ }
+ size_ = size_ - size;
}
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
diff --git a/lib/scudo/scudo_allocator_secondary.h b/lib/scudo/scudo_allocator_secondary.h
index 5220b7c57..1117d51d3 100644
--- a/lib/scudo/scudo_allocator_secondary.h
+++ b/lib/scudo/scudo_allocator_secondary.h
@@ -31,14 +31,15 @@ class ScudoLargeMmapAllocator {
uptr UserSize = Size - AlignedChunkHeaderSize;
// The Scudo frontend prevents us from allocating more than
// MaxAllowedMallocSize, so integer overflow checks would be superfluous.
- uptr MapSize = Size + SecondaryHeaderSize;
+ uptr MapSize = Size + AlignedReservedAddressRangeSize;
if (Alignment > MinAlignment)
MapSize += Alignment;
MapSize = RoundUpTo(MapSize, PageSize);
// Account for 2 guard pages, one before and one after the chunk.
MapSize += 2 * PageSize;
- uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize));
+ ReservedAddressRange AddressRange;
+ uptr MapBeg = AddressRange.Init(MapSize);
if (MapBeg == ~static_cast<uptr>(0))
return ReturnNullOrDieOnFailure::OnOOM();
// A page-aligned pointer is assumed after that, so check it now.
@@ -61,27 +62,27 @@ class ScudoLargeMmapAllocator {
PageSize;
CHECK_GE(NewMapBeg, MapBeg);
if (NewMapBeg != MapBeg) {
- UnmapOrDie(reinterpret_cast<void *>(MapBeg), NewMapBeg - MapBeg);
+ AddressRange.Unmap(MapBeg, NewMapBeg - MapBeg);
MapBeg = NewMapBeg;
}
UserEnd = UserBeg + UserSize;
}
uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize;
if (NewMapEnd != MapEnd) {
- UnmapOrDie(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd);
+ AddressRange.Unmap(NewMapEnd, MapEnd - NewMapEnd);
MapEnd = NewMapEnd;
}
MapSize = MapEnd - MapBeg;
}
CHECK_LE(UserEnd, MapEnd - PageSize);
- // Actually mmap the memory, preserving the guard pages on either side.
- CHECK_EQ(MapBeg + PageSize, reinterpret_cast<uptr>(
- MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize)));
+ // Actually mmap the memory, preserving the guard pages on either side
+ CHECK_EQ(MapBeg + PageSize,
+ AddressRange.Map(MapBeg + PageSize, MapSize - 2 * PageSize));
uptr Ptr = UserBeg - AlignedChunkHeaderSize;
- SecondaryHeader *Header = getHeader(Ptr);
- Header->MapBeg = MapBeg;
- Header->MapSize = MapSize;
+ ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
+ *StoredRange = AddressRange;
+
// The primary adds the whole class size to the stats when allocating a
// chunk, so we will do something similar here. But we will not account for
// the guard pages.
@@ -95,41 +96,41 @@ class ScudoLargeMmapAllocator {
}
void Deallocate(AllocatorStats *Stats, void *Ptr) {
- SecondaryHeader *Header = getHeader(Ptr);
+ // Since we're unmapping the entirety of where the ReservedAddressRange
+ // actually is, copy onto the stack.
+ ReservedAddressRange AddressRange = *getReservedAddressRange(Ptr);
{
SpinMutexLock l(&StatsMutex);
- Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize);
- Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize);
+ Stats->Sub(AllocatorStatAllocated, AddressRange.size() - 2 * PageSize);
+ Stats->Sub(AllocatorStatMapped, AddressRange.size() - 2 * PageSize);
}
- UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
+ AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
+ AddressRange.size());
}
uptr GetActuallyAllocatedSize(void *Ptr) {
- SecondaryHeader *Header = getHeader(Ptr);
- // Deduct PageSize as MapSize includes the trailing guard page.
- uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize;
+ ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
+ // Deduct PageSize as ReservedAddressRange size includes the trailing guard
+ // page.
+ uptr MapEnd = reinterpret_cast<uptr>(StoredRange->base()) +
+ StoredRange->size() - PageSize;
return MapEnd - reinterpret_cast<uptr>(Ptr);
}
private:
- // A Secondary allocated chunk header contains the base of the mapping and
- // its size, which comprises the guard pages.
- struct SecondaryHeader {
- uptr MapBeg;
- uptr MapSize;
- };
- // Check that sizeof(SecondaryHeader) is a multiple of MinAlignment.
- COMPILER_CHECK((sizeof(SecondaryHeader) & (MinAlignment - 1)) == 0);
-
- SecondaryHeader *getHeader(uptr Ptr) {
- return reinterpret_cast<SecondaryHeader*>(Ptr - sizeof(SecondaryHeader));
+ ReservedAddressRange *getReservedAddressRange(uptr Ptr) {
+ return reinterpret_cast<ReservedAddressRange*>(
+ Ptr - sizeof(ReservedAddressRange));
}
- SecondaryHeader *getHeader(const void *Ptr) {
- return getHeader(reinterpret_cast<uptr>(Ptr));
+ ReservedAddressRange *getReservedAddressRange(const void *Ptr) {
+ return getReservedAddressRange(reinterpret_cast<uptr>(Ptr));
}
- const uptr SecondaryHeaderSize = sizeof(SecondaryHeader);
- const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize;
+ const uptr AlignedReservedAddressRangeSize =
+ RoundUpTo(sizeof(ReservedAddressRange), MinAlignment);
+ const uptr HeadersSize =
+ AlignedReservedAddressRangeSize + AlignedChunkHeaderSize;
+
uptr PageSize;
SpinMutex StatsMutex;
};