summaryrefslogtreecommitdiff
path: root/lib/scudo/scudo_allocator_secondary.h
blob: 220ce875e1db4902ccc002930416fe00708696a6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Scudo Secondary Allocator.
/// This services allocation that are too large to be serviced by the Primary
/// Allocator. It is directly backed by the memory mapping functions of the
/// operating system.
///
//===----------------------------------------------------------------------===//

#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
#define SCUDO_ALLOCATOR_SECONDARY_H_

namespace __scudo {

class ScudoLargeMmapAllocator {
 public:

  void Init(bool AllocatorMayReturnNull) {
    PageSize = GetPageSizeCached();
    atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_relaxed);
  }

  void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
    // The Scudo frontend prevents us from allocating more than
    // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
    uptr MapSize = RoundUpTo(Size + sizeof(SecondaryHeader), PageSize);
    // Account for 2 guard pages, one before and one after the chunk.
    uptr MapBeg = reinterpret_cast<uptr>(MmapNoAccess(MapSize + 2 * PageSize));
    CHECK_NE(MapBeg, ~static_cast<uptr>(0));
    // A page-aligned pointer is assumed after that, so check it now.
    CHECK(IsAligned(MapBeg, PageSize));
    MapBeg += PageSize;
    CHECK_EQ(MapBeg, reinterpret_cast<uptr>(MmapFixedOrDie(MapBeg, MapSize)));
    uptr MapEnd = MapBeg + MapSize;
    uptr Ptr = MapBeg + sizeof(SecondaryHeader);
    // TODO(kostyak): add a random offset to Ptr.
    CHECK_GT(Ptr + Size, MapBeg);
    CHECK_LT(Ptr + Size, MapEnd);
    SecondaryHeader *Header = getHeader(Ptr);
    Header->MapBeg = MapBeg - PageSize;
    Header->MapSize = MapSize + 2 * PageSize;
    Stats->Add(AllocatorStatAllocated, MapSize);
    Stats->Add(AllocatorStatMapped, MapSize);
    return reinterpret_cast<void *>(Ptr);
  }

  void *ReturnNullOrDieOnBadRequest() {
    if (atomic_load(&MayReturnNull, memory_order_acquire))
      return nullptr;
    ReportAllocatorCannotReturnNull(false);
  }

  void *ReturnNullOrDieOnOOM() {
    if (atomic_load(&MayReturnNull, memory_order_acquire))
      return nullptr;
    ReportAllocatorCannotReturnNull(true);
  }

  void SetMayReturnNull(bool AllocatorMayReturnNull) {
    atomic_store(&MayReturnNull, AllocatorMayReturnNull, memory_order_release);
  }

  void Deallocate(AllocatorStats *Stats, void *Ptr) {
    SecondaryHeader *Header = getHeader(Ptr);
    Stats->Sub(AllocatorStatAllocated, Header->MapSize);
    Stats->Sub(AllocatorStatMapped, Header->MapSize);
    UnmapOrDie(reinterpret_cast<void *>(Header->MapBeg), Header->MapSize);
  }

  uptr TotalMemoryUsed() {
    UNIMPLEMENTED();
  }

  bool PointerIsMine(const void *Ptr) {
    UNIMPLEMENTED();
  }

  uptr GetActuallyAllocatedSize(void *Ptr) {
    SecondaryHeader *Header = getHeader(Ptr);
    // Deduct PageSize as MapEnd includes the trailing guard page.
    uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize;
    return MapEnd - reinterpret_cast<uptr>(Ptr);
  }

  void *GetMetaData(const void *Ptr) {
    UNIMPLEMENTED();
  }

  void *GetBlockBegin(const void *Ptr) {
    UNIMPLEMENTED();
  }

  void *GetBlockBeginFastLocked(void *Ptr) {
    UNIMPLEMENTED();
  }

  void PrintStats() {
    UNIMPLEMENTED();
  }

  void ForceLock() {
    UNIMPLEMENTED();
  }

  void ForceUnlock() {
    UNIMPLEMENTED();
  }

  void ForEachChunk(ForEachChunkCallback Callback, void *Arg) {
    UNIMPLEMENTED();
  }

 private:
  // A Secondary allocated chunk header contains the base of the mapping and
  // its size. Currently, the base is always a page before the header, but
  // we might want to extend that number in the future based on the size of
  // the allocation.
  struct SecondaryHeader {
    uptr MapBeg;
    uptr MapSize;
  };
  // Check that sizeof(SecondaryHeader) is a multiple of 16.
  COMPILER_CHECK((sizeof(SecondaryHeader) & 0xf) == 0);

  SecondaryHeader *getHeader(uptr Ptr) {
    return reinterpret_cast<SecondaryHeader*>(Ptr - sizeof(SecondaryHeader));
  }
  SecondaryHeader *getHeader(const void *Ptr) {
    return getHeader(reinterpret_cast<uptr>(Ptr));
  }

  uptr PageSize;
  atomic_uint8_t MayReturnNull;
};

} // namespace __scudo

#endif  // SCUDO_ALLOCATOR_SECONDARY_H_