summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_local_cache.h
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2017-04-13 16:49:16 +0000
committerAlex Shlyapnikov <alekseys@google.com>2017-04-13 16:49:16 +0000
commit365d041bb0cb543aa639c385eeaf31c33d802730 (patch)
tree8c64a6a6708584ec5992bf783eee38f91add2886 /lib/sanitizer_common/sanitizer_allocator_local_cache.h
parent546e70f188dbda325cea1af5675042981d0ba38a (diff)
Cache size per class size in SizeClassAllocatorXLocalCache.
Summary: Allocator::ClassIdToSize() is not free and calling it in every Allocate/Deallocate has noticeable impact on perf. Reapplying D31991 with the appropriate fixes. Reviewers: cryptoad Subscribers: kubamracek, llvm-commits Differential Revision: https://reviews.llvm.org/D32024 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@300216 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_local_cache.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_local_cache.h12
1 files changed, 8 insertions, 4 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
index a36619446..d6c66604e 100644
--- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -45,10 +45,10 @@ struct SizeClassAllocator64LocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(c, allocator, class_id);
+ stats_.Add(AllocatorStatAllocated, c->class_size);
CHECK_GT(c->count, 0);
CompactPtrT chunk = c->chunks[--c->count];
void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
@@ -62,8 +62,8 @@ struct SizeClassAllocator64LocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
+ stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(c, allocator, class_id, c->max_count / 2);
@@ -85,6 +85,7 @@ struct SizeClassAllocator64LocalCache {
struct PerClass {
u32 count;
u32 max_count;
+ uptr class_size;
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
};
PerClass per_class_[kNumClasses];
@@ -96,6 +97,7 @@ struct SizeClassAllocator64LocalCache {
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
+ c->class_size = Allocator::ClassIdToSize(i);
}
}
@@ -141,10 +143,10 @@ struct SizeClassAllocator32LocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
+ stats_.Add(AllocatorStatAllocated, c->class_size);
void *res = c->batch[--c->count];
PREFETCH(c->batch[c->count - 1]);
return res;
@@ -156,8 +158,8 @@ struct SizeClassAllocator32LocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
+ stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(allocator, class_id);
@@ -177,6 +179,7 @@ struct SizeClassAllocator32LocalCache {
struct PerClass {
uptr count;
uptr max_count;
+ uptr class_size;
void *batch[2 * TransferBatch::kMaxNumCached];
};
PerClass per_class_[kNumClasses];
@@ -188,6 +191,7 @@ struct SizeClassAllocator32LocalCache {
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
c->max_count = 2 * TransferBatch::MaxCached(i);
+ c->class_size = Allocator::ClassIdToSize(i);
}
}