summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_local_cache.h
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2016-08-06 01:24:11 +0000
committerKostya Serebryany <kcc@google.com>2016-08-06 01:24:11 +0000
commit790fa40271e6c8b4e4c5994ad4b0303201639dab (patch)
tree1810b1a4402561813b619cb959c28e016c3be71c /lib/sanitizer_common/sanitizer_allocator_local_cache.h
parentbd3c4f9891018b0a82aa278d5d22e9eb5860694b (diff)
[sanitizer] allocator: move TransferBatch into SizeClassAllocator64/SizeClassAllocator32 because we actually need different iplementations for the 64- and 32-bit case. NFC; the following patches will make the TransferBatch implementations differ
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@277899 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_local_cache.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_local_cache.h56
1 files changed, 43 insertions, 13 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
index 4fe99561f..a63a50693 100644
--- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -20,6 +20,7 @@
template<class SizeClassAllocator>
struct SizeClassAllocatorLocalCache {
typedef SizeClassAllocator Allocator;
+ typedef typename Allocator::TransferBatch TransferBatch;
static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
void Init(AllocatorGlobalStats *s) {
@@ -37,7 +38,7 @@ struct SizeClassAllocatorLocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
@@ -52,7 +53,7 @@ struct SizeClassAllocatorLocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
@@ -70,7 +71,6 @@ struct SizeClassAllocatorLocalCache {
// private:
typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
- typedef typename SizeClassMap::TransferBatch Batch;
struct PerClass {
uptr count;
uptr max_count;
@@ -88,27 +88,57 @@ struct SizeClassAllocatorLocalCache {
}
}
- // Returns a Batch suitable for class_id.
+ // TransferBatch class is declared in SizeClassAllocator.
+ // We transfer chunks between central and thread-local free lists in batches.
+ // For small size classes we allocate batches separately.
+ // For large size classes we may use one of the chunks to store the batch.
+ // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
+
+ // If kUseSeparateSizeClassForBatch is true,
+ // all TransferBatch objects are allocated from kBatchClassID
+ // size class (except for those that are needed for kBatchClassID itself).
+ // The goal is to have TransferBatches in a totally different region of RAM
+ // to improve security and allow more efficient RAM reclamation.
+ // This is experimental and may currently increase memory usage by up to 3%
+ // in extreme cases.
+ static const bool kUseSeparateSizeClassForBatch = false;
+
+ static uptr SizeClassForTransferBatch(uptr class_id) {
+ if (kUseSeparateSizeClassForBatch)
+ return class_id == SizeClassMap::kBatchClassID
+ ? 0
+ : SizeClassMap::kBatchClassID;
+ if (Allocator::ClassIdToSize(class_id) <
+ sizeof(TransferBatch) -
+ sizeof(uptr) * (SizeClassMap::kMaxNumCached -
+ SizeClassMap::MaxCached(class_id)))
+ return SizeClassMap::ClassID(sizeof(TransferBatch));
+ return 0;
+ }
+
+ // Returns a TransferBatch suitable for class_id.
// For small size classes allocates the batch from the allocator.
// For large size classes simply returns b.
- Batch *CreateBatch(uptr class_id, SizeClassAllocator *allocator, Batch *b) {
- if (uptr batch_class_id = SizeClassMap::SizeClassForTransferBatch(class_id))
- return (Batch*)Allocate(allocator, batch_class_id);
+ TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+ return (TransferBatch*)Allocate(allocator, batch_class_id);
return b;
}
- // Destroys Batch b.
+ // Destroys TransferBatch b.
// For small size classes deallocates b to the allocator.
// Does notthing for large size classes.
- void DestroyBatch(uptr class_id, SizeClassAllocator *allocator, Batch *b) {
- if (uptr batch_class_id = SizeClassMap::SizeClassForTransferBatch(class_id))
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
Deallocate(allocator, batch_class_id, b);
}
NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
- Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
CHECK_GT(b->Count(), 0);
for (uptr i = 0; i < b->Count(); i++)
c->batch[i] = b->Get(i);
@@ -121,8 +151,8 @@ struct SizeClassAllocatorLocalCache {
PerClass *c = &per_class_[class_id];
uptr cnt = Min(c->max_count / 2, c->count);
uptr first_idx_to_drain = c->count - cnt;
- Batch *b =
- CreateBatch(class_id, allocator, (Batch *)c->batch[first_idx_to_drain]);
+ TransferBatch *b = CreateBatch(
+ class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
b->SetFromArray(&c->batch[first_idx_to_drain], cnt);
c->count -= cnt;
allocator->DeallocateBatch(&stats_, class_id, b);