summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_local_cache.h
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2017-06-22 00:02:37 +0000
committerAlex Shlyapnikov <alekseys@google.com>2017-06-22 00:02:37 +0000
commit712a9e568d79c2d7e424a60e8cdadf4d6b58e5fc (patch)
treed7bebe1079ff47be0ba3397ad164dce6fb10be1a /lib/sanitizer_common/sanitizer_allocator_local_cache.h
parentef55de50c1c83559a70bccfe6439c7b8284e2b75 (diff)
[Sanitizers] 32 bit allocator respects allocator_may_return_null flag
Summary: Make SizeClassAllocator32 return nullptr when it encounters OOM, which allows the entire sanitizer's allocator to follow allocator_may_return_null=1 policy, even for small allocations (LargeMmapAllocator is already fixed by D34243). Will add a test for OOM in primary allocator later, when SizeClassAllocator64 can gracefully handle OOM too. Reviewers: eugenis Subscribers: kubamracek, llvm-commits Differential Revision: https://reviews.llvm.org/D34433 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@305972 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_local_cache.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_local_cache.h15
1 files changed, 12 insertions, 3 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
index b3729bf55..8fa62a3bf 100644
--- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -144,8 +144,10 @@ struct SizeClassAllocator32LocalCache {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
PerClass *c = &per_class_[class_id];
- if (UNLIKELY(c->count == 0))
- Refill(allocator, class_id);
+ if (UNLIKELY(c->count == 0)) {
+ if (UNLIKELY(!Refill(allocator, class_id)))
+ return nullptr;
+ }
stats_.Add(AllocatorStatAllocated, c->class_size);
void *res = c->batch[--c->count];
PREFETCH(c->batch[c->count - 1]);
@@ -227,14 +229,17 @@ struct SizeClassAllocator32LocalCache {
Deallocate(allocator, batch_class_id, b);
}
- NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
+ NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ if (UNLIKELY(!b))
+ return false;
CHECK_GT(b->Count(), 0);
b->CopyToArray(c->batch);
c->count = b->Count();
DestroyBatch(class_id, allocator, b);
+ return true;
}
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
@@ -244,6 +249,10 @@ struct SizeClassAllocator32LocalCache {
uptr first_idx_to_drain = c->count - cnt;
TransferBatch *b = CreateBatch(
class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
+ // Failure to allocate a batch while releasing memory is non recoverable.
+ // TODO(alekseys): Figure out how to do it without allocating a new batch.
+ if (UNLIKELY(!b))
+ DieOnFailure::OnOOM();
b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
&c->batch[first_idx_to_drain], cnt);
c->count -= cnt;