summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2016-07-21 18:47:53 +0000
committerKostya Serebryany <kcc@google.com>2016-07-21 18:47:53 +0000
commit52ee88fd9cb9eaadfb9e8a6b460b57c2e25cb95d (patch)
tree53b75f77cfbcb754824b1bee3cc01a0934d72be1 /lib/sanitizer_common
parentc8da6db25e5e82ea0ce79615cdffdc4966e2900f (diff)
[sanitizer] allocator: remove kPopulateSize and only use SizeClassMap::MaxCached; ensure that TransferBatch size is a power of two, refactor TransferBatch creation/destruction into separate functions.
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@276318 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_local_cache.h28
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary32.h5
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary64.h10
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_size_class_map.h7
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc2
5 files changed, 28 insertions, 24 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
index c133729ce..2b7a842e2 100644
--- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -88,6 +88,23 @@ struct SizeClassAllocatorLocalCache {
}
}
+ // Returns a Batch suitable for class_id.
+ // For small size classes allocates the batch from the allocator.
+ // For large size classes simply returns b.
+ Batch *CreateBatch(uptr class_id, SizeClassAllocator *allocator, Batch *b) {
+ if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
+ return (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
+ return b;
+ }
+
+ // Destroys Batch b.
+ // For small size classes deallocates b to the allocator.
+ // Does notthing for large size classes.
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator, Batch *b) {
+ if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
+ Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
+ }
+
NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
@@ -96,18 +113,13 @@ struct SizeClassAllocatorLocalCache {
for (uptr i = 0; i < b->count; i++)
c->batch[i] = b->batch[i];
c->count = b->count;
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
+ DestroyBatch(class_id, allocator, b);
}
NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
- Batch *b;
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)c->batch[0];
+ Batch *b = CreateBatch(class_id, allocator, (Batch*)c->batch[0]);
uptr cnt = Min(c->max_count / 2, c->count);
for (uptr i = 0; i < cnt; i++) {
b->batch[i] = c->batch[i];
@@ -119,5 +131,3 @@ struct SizeClassAllocatorLocalCache {
allocator->DeallocateBatch(&stats_, class_id, b);
}
};
-
-
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h
index 7d4d027a7..4b4574c1e 100644
--- a/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -231,10 +231,7 @@ class SizeClassAllocator32 {
Batch *b = nullptr;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
if (!b) {
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)i;
+ b = c->CreateBatch(class_id, this, (Batch*)i);
b->count = 0;
}
b->batch[b->count++] = (void*)i;
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary64.h b/lib/sanitizer_common/sanitizer_allocator_primary64.h
index c99e977d0..13f352adb 100644
--- a/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -219,9 +219,6 @@ class SizeClassAllocator64 {
uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
// kRegionSize must be >= 2^32.
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
- // Populate the free list with at most this number of bytes at once
- // or with one element if its size is greater.
- static const uptr kPopulateSize = 1 << 14;
// Call mmap for user memory with at least this size.
static const uptr kUserMapSize = 1 << 16;
// Call mmap for metadata memory with at least this size.
@@ -261,7 +258,7 @@ class SizeClassAllocator64 {
if (b)
return b;
uptr size = SizeClassMap::Size(class_id);
- uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
+ uptr count = SizeClassMap::MaxCached(class_id);
uptr beg_idx = region->allocated_user;
uptr end_idx = beg_idx + count * size;
uptr region_beg = SpaceBeg() + kRegionSize * class_id;
@@ -296,10 +293,7 @@ class SizeClassAllocator64 {
Die();
}
for (;;) {
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)(region_beg + beg_idx);
+ b = c->CreateBatch(class_id, this, (Batch*)(region_beg + beg_idx));
b->count = count;
for (uptr i = 0; i < count; i++)
b->batch[i] = (void*)(region_beg + beg_idx + i * size);
diff --git a/lib/sanitizer_common/sanitizer_allocator_size_class_map.h b/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
index 612064ec2..9d0566a84 100644
--- a/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
+++ b/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
@@ -87,14 +87,17 @@ class SizeClassMap {
public:
static const uptr kMaxNumCached = kMaxNumCachedT;
+ COMPILER_CHECK(((kMaxNumCached + 2) & (kMaxNumCached + 1)) == 0);
// We transfer chunks between central and thread-local free lists in batches.
// For small size classes we allocate batches separately.
// For large size classes we use one of the chunks to store the batch.
+ // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
struct TransferBatch {
TransferBatch *next;
uptr count;
void *batch[kMaxNumCached];
};
+ COMPILER_CHECK((sizeof(TransferBatch) & (sizeof(TransferBatch) - 1)) == 0);
static const uptr kMaxSize = 1UL << kMaxSizeLog;
static const uptr kNumClasses =
@@ -180,7 +183,7 @@ class SizeClassMap {
}
};
-typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
-typedef SizeClassMap<17, 64, 14> CompactSizeClassMap;
+typedef SizeClassMap<17, 126, 16> DefaultSizeClassMap;
+typedef SizeClassMap<17, 62, 14> CompactSizeClassMap;
template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index 31eec19c3..76c00e4f4 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -781,7 +781,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
// Regression test for out-of-memory condition in PopulateFreeList().
TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
// In a world where regions are small and chunks are huge...
- typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
+ typedef SizeClassMap<63, 126, 16> SpecialSizeClassMap;
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
SpecialSizeClassMap> SpecialAllocator64;
const uptr kRegionSize =