summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_local_cache.h
blob: 1b3c2c0c19d1d8e898f0d6bdf413f0a1fbbe1a14 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Part of the Sanitizer Allocator.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_H
#error This file must be included inside sanitizer_allocator.h
#endif

// Objects of this type should be used as local caches for SizeClassAllocator64
// or SizeClassAllocator32. Since the typical use of this class is to have one
// object per thread in TLS, is has to be POD.
template<class SizeClassAllocator>
struct SizeClassAllocatorLocalCache
    : SizeClassAllocator::AllocatorCache {
};

// Cache used by SizeClassAllocator64.
template <class SizeClassAllocator>
struct SizeClassAllocator64LocalCache {
  typedef SizeClassAllocator Allocator;

  void Init(AllocatorGlobalStats *s) {
    stats_.Init();
    if (s)
      s->Register(&stats_);
  }

  void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
    Drain(allocator);
    if (s)
      s->Unregister(&stats_);
  }

  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
    CHECK_NE(class_id, 0UL);
    CHECK_LT(class_id, kNumClasses);
    PerClass *c = &per_class_[class_id];
    if (UNLIKELY(c->count == 0)) {
      if (UNLIKELY(!Refill(c, allocator, class_id)))
        return nullptr;
    }
    stats_.Add(AllocatorStatAllocated, c->class_size);
    CHECK_GT(c->count, 0);
    CompactPtrT chunk = c->chunks[--c->count];
    void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
        allocator->GetRegionBeginBySizeClass(class_id), chunk));
    return res;
  }

  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
    CHECK_NE(class_id, 0UL);
    CHECK_LT(class_id, kNumClasses);
    // If the first allocator call on a new thread is a deallocation, then
    // max_count will be zero, leading to check failure.
    InitCache();
    PerClass *c = &per_class_[class_id];
    stats_.Sub(AllocatorStatAllocated, c->class_size);
    CHECK_NE(c->max_count, 0UL);
    if (UNLIKELY(c->count == c->max_count))
      Drain(c, allocator, class_id, c->max_count / 2);
    CompactPtrT chunk = allocator->PointerToCompactPtr(
        allocator->GetRegionBeginBySizeClass(class_id),
        reinterpret_cast<uptr>(p));
    c->chunks[c->count++] = chunk;
  }

  void Drain(SizeClassAllocator *allocator) {
    for (uptr i = 0; i < kNumClasses; i++) {
      PerClass *c = &per_class_[i];
      while (c->count > 0)
        Drain(c, allocator, i, c->count);
    }
  }

 private:
  typedef typename Allocator::SizeClassMapT SizeClassMap;
  static const uptr kNumClasses = SizeClassMap::kNumClasses;
  typedef typename Allocator::CompactPtrT CompactPtrT;

  struct PerClass {
    u32 count;
    u32 max_count;
    uptr class_size;
    CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
  };
  PerClass per_class_[kNumClasses];
  AllocatorStats stats_;

  void InitCache() {
    if (LIKELY(per_class_[1].max_count))
      return;
    for (uptr i = 0; i < kNumClasses; i++) {
      PerClass *c = &per_class_[i];
      c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
      c->class_size = Allocator::ClassIdToSize(i);
    }
  }

  NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
                       uptr class_id) {
    InitCache();
    uptr num_requested_chunks = c->max_count / 2;
    if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
                                              num_requested_chunks)))
      return false;
    c->count = num_requested_chunks;
    return true;
  }

  NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
                      uptr count) {
    InitCache();
    CHECK_GE(c->count, count);
    uptr first_idx_to_drain = c->count - count;
    c->count -= count;
    allocator->ReturnToAllocator(&stats_, class_id,
                                 &c->chunks[first_idx_to_drain], count);
  }
};

// Cache used by SizeClassAllocator32.
template <class SizeClassAllocator>
struct SizeClassAllocator32LocalCache {
  typedef SizeClassAllocator Allocator;
  typedef typename Allocator::TransferBatch TransferBatch;

  void Init(AllocatorGlobalStats *s) {
    stats_.Init();
    if (s)
      s->Register(&stats_);
  }

  // Returns a TransferBatch suitable for class_id.
  TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
                             TransferBatch *b) {
    if (uptr batch_class_id = per_class_[class_id].batch_class_id)
      return (TransferBatch*)Allocate(allocator, batch_class_id);
    return b;
  }

  // Destroys TransferBatch b.
  void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
                    TransferBatch *b) {
    if (uptr batch_class_id = per_class_[class_id].batch_class_id)
      Deallocate(allocator, batch_class_id, b);
  }

  void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
    Drain(allocator);
    if (s)
      s->Unregister(&stats_);
  }

  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
    CHECK_NE(class_id, 0UL);
    CHECK_LT(class_id, kNumClasses);
    PerClass *c = &per_class_[class_id];
    if (UNLIKELY(c->count == 0)) {
      if (UNLIKELY(!Refill(allocator, class_id)))
        return nullptr;
    }
    stats_.Add(AllocatorStatAllocated, c->class_size);
    void *res = c->batch[--c->count];
    PREFETCH(c->batch[c->count - 1]);
    return res;
  }

  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
    CHECK_NE(class_id, 0UL);
    CHECK_LT(class_id, kNumClasses);
    // If the first allocator call on a new thread is a deallocation, then
    // max_count will be zero, leading to check failure.
    InitCache();
    PerClass *c = &per_class_[class_id];
    stats_.Sub(AllocatorStatAllocated, c->class_size);
    CHECK_NE(c->max_count, 0UL);
    if (UNLIKELY(c->count == c->max_count))
      Drain(allocator, class_id);
    c->batch[c->count++] = p;
  }

  void Drain(SizeClassAllocator *allocator) {
    for (uptr i = 0; i < kNumClasses; i++) {
      PerClass *c = &per_class_[i];
      while (c->count > 0)
        Drain(allocator, i);
    }
  }

 private:
  typedef typename Allocator::SizeClassMapT SizeClassMap;
  static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
  static const uptr kNumClasses = SizeClassMap::kNumClasses;
  // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
  // allocated from kBatchClassID size class (except for those that are needed
  // for kBatchClassID itself). The goal is to have TransferBatches in a totally
  // different region of RAM to improve security.
  static const bool kUseSeparateSizeClassForBatch =
      Allocator::kUseSeparateSizeClassForBatch;

  struct PerClass {
    uptr count;
    uptr max_count;
    uptr class_size;
    uptr batch_class_id;
    void *batch[2 * TransferBatch::kMaxNumCached];
  };
  PerClass per_class_[kNumClasses];
  AllocatorStats stats_;

  void InitCache() {
    if (LIKELY(per_class_[1].max_count))
      return;
    const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
    for (uptr i = 0; i < kNumClasses; i++) {
      PerClass *c = &per_class_[i];
      uptr max_cached = TransferBatch::MaxCached(i);
      c->max_count = 2 * max_cached;
      c->class_size = Allocator::ClassIdToSize(i);
      // Precompute the class id to use to store batches for the current class
      // id. 0 means the class size is large enough to store a batch within one
      // of the chunks. If using a separate size class, it will always be
      // kBatchClassID, except for kBatchClassID itself.
      if (kUseSeparateSizeClassForBatch) {
        c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
      } else {
        c->batch_class_id = (c->class_size <
          TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
              batch_class_id : 0;
      }
    }
  }

  NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
    InitCache();
    PerClass *c = &per_class_[class_id];
    TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
    if (UNLIKELY(!b))
      return false;
    CHECK_GT(b->Count(), 0);
    b->CopyToArray(c->batch);
    c->count = b->Count();
    DestroyBatch(class_id, allocator, b);
    return true;
  }

  NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
    InitCache();
    PerClass *c = &per_class_[class_id];
    uptr cnt = Min(c->max_count / 2, c->count);
    uptr first_idx_to_drain = c->count - cnt;
    TransferBatch *b = CreateBatch(
        class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
    // Failure to allocate a batch while releasing memory is non recoverable.
    // TODO(alekseys): Figure out how to do it without allocating a new batch.
    if (UNLIKELY(!b))
      DieOnFailure::OnOOM();
    b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
                    &c->batch[first_idx_to_drain], cnt);
    c->count -= cnt;
    allocator->DeallocateBatch(&stats_, class_id, b);
  }
};