summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_local_cache.h
diff options
context:
space:
mode:
authorDiana Picus <diana.picus@linaro.org>2017-04-13 07:39:04 +0000
committerDiana Picus <diana.picus@linaro.org>2017-04-13 07:39:04 +0000
commita1687c29181cb02ce005689e09613af28de45de0 (patch)
tree4f42d075ee3be00c16ed15fd7ad6120984c71265 /lib/sanitizer_common/sanitizer_allocator_local_cache.h
parent4a814b2e509432c806ba7b0523bbed84923cab21 (diff)
Revert "Cache size per class size in SizeClassAllocatorXLocalCache."
This reverts commit r300107 because it broke the ARM and AArch64 buildbots. git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@300180 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_local_cache.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_local_cache.h12
1 files changed, 4 insertions, 8 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
index 1b72b17fa..a36619446 100644
--- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -45,8 +45,8 @@ struct SizeClassAllocator64LocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
- stats_.Add(AllocatorStatAllocated, c->class_size);
if (UNLIKELY(c->count == 0))
Refill(c, allocator, class_id);
CHECK_GT(c->count, 0);
@@ -62,8 +62,8 @@ struct SizeClassAllocator64LocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
- stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(c, allocator, class_id, c->max_count / 2);
@@ -85,7 +85,6 @@ struct SizeClassAllocator64LocalCache {
struct PerClass {
u32 count;
u32 max_count;
- uptr class_size;
CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
};
PerClass per_class_[kNumClasses];
@@ -97,7 +96,6 @@ struct SizeClassAllocator64LocalCache {
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
- c->class_size = Allocator::ClassIdToSize(i);
}
}
@@ -143,8 +141,8 @@ struct SizeClassAllocator32LocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
- stats_.Add(AllocatorStatAllocated, c->class_size);
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
void *res = c->batch[--c->count];
@@ -158,8 +156,8 @@ struct SizeClassAllocator32LocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
- stats_.Sub(AllocatorStatAllocated, c->class_size);
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
Drain(allocator, class_id);
@@ -179,7 +177,6 @@ struct SizeClassAllocator32LocalCache {
struct PerClass {
uptr count;
uptr max_count;
- uptr class_size;
void *batch[2 * TransferBatch::kMaxNumCached];
};
PerClass per_class_[kNumClasses];
@@ -191,7 +188,6 @@ struct SizeClassAllocator32LocalCache {
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
c->max_count = 2 * TransferBatch::MaxCached(i);
- c->class_size = Allocator::ClassIdToSize(i);
}
}