summaryrefslogtreecommitdiff
path: root/lib/scudo/scudo_allocator_secondary.h
blob: 1117d51d33fe3820026f326931e027d9dc8e22b9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo Secondary Allocator.
/// This services allocation that are too large to be serviced by the Primary
/// Allocator. It is directly backed by the memory mapping functions of the
/// operating system.
///
//===----------------------------------------------------------------------===//

#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
#define SCUDO_ALLOCATOR_SECONDARY_H_

#ifndef SCUDO_ALLOCATOR_H_
# error "This file must be included inside scudo_allocator.h."
#endif

class ScudoLargeMmapAllocator {
 public:
  void Init() {
    PageSize = GetPageSizeCached();
  }

  void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
    uptr UserSize = Size - AlignedChunkHeaderSize;
    // The Scudo frontend prevents us from allocating more than
    // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
    uptr MapSize = Size + AlignedReservedAddressRangeSize;
    if (Alignment > MinAlignment)
      MapSize += Alignment;
    MapSize = RoundUpTo(MapSize, PageSize);
    // Account for 2 guard pages, one before and one after the chunk.
    MapSize += 2 * PageSize;

    ReservedAddressRange AddressRange;
    uptr MapBeg = AddressRange.Init(MapSize);
    if (MapBeg == ~static_cast<uptr>(0))
      return ReturnNullOrDieOnFailure::OnOOM();
    // A page-aligned pointer is assumed after that, so check it now.
    CHECK(IsAligned(MapBeg, PageSize));
    uptr MapEnd = MapBeg + MapSize;
    // The beginning of the user area for that allocation comes after the
    // initial guard page, and both headers. This is the pointer that has to
    // abide by alignment requirements.
    uptr UserBeg = MapBeg + PageSize + HeadersSize;
    uptr UserEnd = UserBeg + UserSize;

    // In the rare event of larger alignments, we will attempt to fit the mmap
    // area better and unmap extraneous memory. This will also ensure that the
    // offset and unused bytes field of the header stay small.
    if (Alignment > MinAlignment) {
      if (!IsAligned(UserBeg, Alignment)) {
        UserBeg = RoundUpTo(UserBeg, Alignment);
        CHECK_GE(UserBeg, MapBeg);
        uptr NewMapBeg = RoundDownTo(UserBeg - HeadersSize, PageSize) -
            PageSize;
        CHECK_GE(NewMapBeg, MapBeg);
        if (NewMapBeg != MapBeg) {
          AddressRange.Unmap(MapBeg, NewMapBeg - MapBeg);
          MapBeg = NewMapBeg;
        }
        UserEnd = UserBeg + UserSize;
      }
      uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize;
      if (NewMapEnd != MapEnd) {
        AddressRange.Unmap(NewMapEnd, MapEnd - NewMapEnd);
        MapEnd = NewMapEnd;
      }
      MapSize = MapEnd - MapBeg;
    }

    CHECK_LE(UserEnd, MapEnd - PageSize);
    // Actually mmap the memory, preserving the guard pages on either side
    CHECK_EQ(MapBeg + PageSize,
             AddressRange.Map(MapBeg + PageSize, MapSize - 2 * PageSize));
    uptr Ptr = UserBeg - AlignedChunkHeaderSize;
    ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
    *StoredRange = AddressRange;

    // The primary adds the whole class size to the stats when allocating a
    // chunk, so we will do something similar here. But we will not account for
    // the guard pages.
    {
      SpinMutexLock l(&StatsMutex);
      Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize);
      Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize);
    }

    return reinterpret_cast<void *>(Ptr);
  }

  void Deallocate(AllocatorStats *Stats, void *Ptr) {
    // Since we're unmapping the entirety of where the ReservedAddressRange
    // actually is, copy onto the stack.
    ReservedAddressRange AddressRange = *getReservedAddressRange(Ptr);
    {
      SpinMutexLock l(&StatsMutex);
      Stats->Sub(AllocatorStatAllocated, AddressRange.size() - 2 * PageSize);
      Stats->Sub(AllocatorStatMapped, AddressRange.size() - 2 * PageSize);
    }
    AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
                       AddressRange.size());
  }

  uptr GetActuallyAllocatedSize(void *Ptr) {
    ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr);
    // Deduct PageSize as ReservedAddressRange size includes the trailing guard
    // page.
    uptr MapEnd = reinterpret_cast<uptr>(StoredRange->base()) +
        StoredRange->size() - PageSize;
    return MapEnd - reinterpret_cast<uptr>(Ptr);
  }

 private:
  ReservedAddressRange *getReservedAddressRange(uptr Ptr) {
    return reinterpret_cast<ReservedAddressRange*>(
        Ptr - sizeof(ReservedAddressRange));
  }
  ReservedAddressRange *getReservedAddressRange(const void *Ptr) {
    return getReservedAddressRange(reinterpret_cast<uptr>(Ptr));
  }

  const uptr AlignedReservedAddressRangeSize =
      RoundUpTo(sizeof(ReservedAddressRange), MinAlignment);
  const uptr HeadersSize =
      AlignedReservedAddressRangeSize + AlignedChunkHeaderSize;

  uptr PageSize;
  SpinMutex StatsMutex;
};

#endif  // SCUDO_ALLOCATOR_SECONDARY_H_