summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_local_cache.h
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2017-08-28 15:20:02 +0000
committerKostya Kortchinsky <kostyak@google.com>2017-08-28 15:20:02 +0000
commitd74a5ec19dabe0cb290087d973d54291cfa68ad1 (patch)
treec342b9f4a8bfe87ff97ef333de4d02b5278dda68 /lib/sanitizer_common/sanitizer_allocator_local_cache.h
parent5824d872dfe59e2a65e2b6a8e4d69b2d06d6d001 (diff)
[sanitizer] Re-introduce kUseSeparateSizeClassForBatch for the 32-bit Primary
Summary: Currently `TransferBatch` are located within the same memory regions as "regular" chunks. This is not ideal for security: they make for an interesting target to overwrite, and are not protected by the frontend (namely, Scudo). To solve this, we re-introduce `kUseSeparateSizeClassForBatch` for the 32-bit Primary allowing for `TransferBatch` to end up in their own memory region. Currently only Scudo would use this new feature, the default behavior remains unchanged. The separate `kBatchClassID` was used for a brief period of time previously but removed when the 64-bit ended up using the "free array". Reviewers: alekseyshl, kcc, eugenis Reviewed By: alekseyshl Subscribers: llvm-commits, kubamracek Differential Revision: https://reviews.llvm.org/D37082 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@311891 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_local_cache.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_local_cache.h96
1 files changed, 51 insertions, 45 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
index ec0742c20..1b3c2c0c1 100644
--- a/lib/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -26,9 +26,6 @@ struct SizeClassAllocatorLocalCache
template <class SizeClassAllocator>
struct SizeClassAllocator64LocalCache {
typedef SizeClassAllocator Allocator;
- static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
- typedef typename Allocator::SizeClassMapT SizeClassMap;
- typedef typename Allocator::CompactPtrT CompactPtrT;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
@@ -76,14 +73,18 @@ struct SizeClassAllocator64LocalCache {
}
void Drain(SizeClassAllocator *allocator) {
- for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
- PerClass *c = &per_class_[class_id];
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
while (c->count > 0)
- Drain(c, allocator, class_id, c->count);
+ Drain(c, allocator, i, c->count);
}
}
- // private:
+ private:
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ typedef typename Allocator::CompactPtrT CompactPtrT;
+
struct PerClass {
u32 count;
u32 max_count;
@@ -94,7 +95,7 @@ struct SizeClassAllocator64LocalCache {
AllocatorStats stats_;
void InitCache() {
- if (per_class_[1].max_count)
+ if (LIKELY(per_class_[1].max_count))
return;
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
@@ -130,7 +131,6 @@ template <class SizeClassAllocator>
struct SizeClassAllocator32LocalCache {
typedef SizeClassAllocator Allocator;
typedef typename Allocator::TransferBatch TransferBatch;
- static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
void Init(AllocatorGlobalStats *s) {
stats_.Init();
@@ -138,6 +138,21 @@ struct SizeClassAllocator32LocalCache {
s->Register(&stats_);
}
+ // Returns a TransferBatch suitable for class_id.
+ TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = per_class_[class_id].batch_class_id)
+ return (TransferBatch*)Allocate(allocator, batch_class_id);
+ return b;
+ }
+
+ // Destroys TransferBatch b.
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = per_class_[class_id].batch_class_id)
+ Deallocate(allocator, batch_class_id, b);
+ }
+
void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
Drain(allocator);
if (s)
@@ -173,66 +188,57 @@ struct SizeClassAllocator32LocalCache {
}
void Drain(SizeClassAllocator *allocator) {
- for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
- PerClass *c = &per_class_[class_id];
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
while (c->count > 0)
- Drain(allocator, class_id);
+ Drain(allocator, i);
}
}
- // private:
- typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
+ private:
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
+ // allocated from kBatchClassID size class (except for those that are needed
+ // for kBatchClassID itself). The goal is to have TransferBatches in a totally
+ // different region of RAM to improve security.
+ static const bool kUseSeparateSizeClassForBatch =
+ Allocator::kUseSeparateSizeClassForBatch;
+
struct PerClass {
uptr count;
uptr max_count;
uptr class_size;
- uptr class_id_for_transfer_batch;
+ uptr batch_class_id;
void *batch[2 * TransferBatch::kMaxNumCached];
};
PerClass per_class_[kNumClasses];
AllocatorStats stats_;
void InitCache() {
- if (per_class_[1].max_count)
+ if (LIKELY(per_class_[1].max_count))
return;
- // TransferBatch class is declared in SizeClassAllocator.
- uptr class_id_for_transfer_batch =
- SizeClassMap::ClassID(sizeof(TransferBatch));
+ const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
for (uptr i = 0; i < kNumClasses; i++) {
PerClass *c = &per_class_[i];
uptr max_cached = TransferBatch::MaxCached(i);
c->max_count = 2 * max_cached;
c->class_size = Allocator::ClassIdToSize(i);
- // We transfer chunks between central and thread-local free lists in
- // batches. For small size classes we allocate batches separately. For
- // large size classes we may use one of the chunks to store the batch.
- // sizeof(TransferBatch) must be a power of 2 for more efficient
- // allocation.
- c->class_id_for_transfer_batch = (c->class_size <
+ // Precompute the class id to use to store batches for the current class
+ // id. 0 means the class size is large enough to store a batch within one
+ // of the chunks. If using a separate size class, it will always be
+ // kBatchClassID, except for kBatchClassID itself.
+ if (kUseSeparateSizeClassForBatch) {
+ c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
+ } else {
+ c->batch_class_id = (c->class_size <
TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
- class_id_for_transfer_batch : 0;
+ batch_class_id : 0;
+ }
}
}
- // Returns a TransferBatch suitable for class_id.
- // For small size classes allocates the batch from the allocator.
- // For large size classes simply returns b.
- TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
- TransferBatch *b) {
- if (uptr batch_class_id = per_class_[class_id].class_id_for_transfer_batch)
- return (TransferBatch*)Allocate(allocator, batch_class_id);
- return b;
- }
-
- // Destroys TransferBatch b.
- // For small size classes deallocates b to the allocator.
- // Does notthing for large size classes.
- void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
- TransferBatch *b) {
- if (uptr batch_class_id = per_class_[class_id].class_id_for_transfer_batch)
- Deallocate(allocator, batch_class_id, b);
- }
-
NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];