summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_primary32.h
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2017-10-25 17:24:56 +0000
committerKostya Kortchinsky <kostyak@google.com>2017-10-25 17:24:56 +0000
commite625e66b75c013d6fdb12955e8bc4be8f41a25c1 (patch)
tree134f02b32279f6e43b184f990b12e1ed097525c2 /lib/sanitizer_common/sanitizer_allocator_primary32.h
parent00b60424097c569e185a667969d054db7004cd0c (diff)
[sanitizer] Random shuffling of chunks for the 32-bit Primary Allocator
Summary: The 64-bit primary has had random shuffling of chunks for a while, this implements it for the 32-bit primary. Scudo is currently the only user of `kRandomShuffleChunks`. This change consists of a few modifications: - move the random shuffling functions out of the 64-bit primary to `sanitizer_common.h`. Alternatively I could move them to `sanitizer_allocator.h` as they are only used in the allocator, I don't feel strongly either way; - small change in the 64-bit primary to make the `rand_state` initialization `UNLIKELY`; - addition of a `rand_state` in the 32-bit primary's `SizeClassInfo` and shuffling of chunks when populating the free list. - enabling the `random_shuffle.cpp` test on platforms using the 32-bit primary for Scudo. Some comments on why the shuffling is done that way. Initially I just implemented a `Shuffle` function in the `TransferBatch` which was simpler but I came to realize this wasn't good enough: for chunks of 10000 bytes for example, with a `CompactSizeClassMap`, a batch holds only 1 chunk, meaning shuffling the batch has no effect, while a region is usually 1MB, eg: 104 chunks of that size. So I decided to "stage" the newly gathered chunks in a temporary array that would be shuffled prior to placing the chunks in batches. The result is looping twice through n_chunks even if shuffling is not enabled, but I didn't notice any significant significant performance impact. Reviewers: alekseyshl Reviewed By: alekseyshl Subscribers: srhines, llvm-commits, kubamracek Differential Revision: https://reviews.llvm.org/D39244 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@316596 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_primary32.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary32.h54
1 files changed, 44 insertions, 10 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h
index 8abf2f9f5..6c682aff6 100644
--- a/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -268,7 +268,8 @@ class SizeClassAllocator32 {
struct SizeClassInfo {
SpinMutex mutex;
IntrusiveList<TransferBatch> free_list;
- char padding[kCacheLineSize - sizeof(uptr) -
+ u32 rand_state;
+ char padding[kCacheLineSize - 2 * sizeof(uptr) -
sizeof(IntrusiveList<TransferBatch>)];
};
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
@@ -301,29 +302,62 @@ class SizeClassAllocator32 {
return &size_class_info_array[class_id];
}
+ bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
+ TransferBatch **current_batch, uptr max_count,
+ uptr *pointers_array, uptr count) {
+ // If using a separate class for batches, we do not need to shuffle it.
+ if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
+ class_id != SizeClassMap::kBatchClassID))
+ RandomShuffle(pointers_array, count, &sci->rand_state);
+ TransferBatch *b = *current_batch;
+ for (uptr i = 0; i < count; i++) {
+ if (!b) {
+ b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
+ if (UNLIKELY(!b))
+ return false;
+ b->Clear();
+ }
+ b->Add((void*)pointers_array[i]);
+ if (b->Count() == max_count) {
+ sci->free_list.push_back(b);
+ b = nullptr;
+ }
+ }
+ *current_batch = b;
+ return true;
+ }
+
bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
SizeClassInfo *sci, uptr class_id) {
uptr size = ClassIdToSize(class_id);
uptr reg = AllocateRegion(stat, class_id);
if (UNLIKELY(!reg))
return false;
+ if (kRandomShuffleChunks)
+ if (UNLIKELY(sci->rand_state == 0))
+ // The random state is initialized from ASLR (PIE) and time.
+ sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
uptr n_chunks = kRegionSize / (size + kMetadataSize);
uptr max_count = TransferBatch::MaxCached(class_id);
CHECK_GT(max_count, 0);
TransferBatch *b = nullptr;
+ const uptr kShuffleArraySize = 48;
+ uptr shuffle_array[kShuffleArraySize];
+ uptr count = 0;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
- if (!b) {
- b = c->CreateBatch(class_id, this, (TransferBatch*)i);
- if (UNLIKELY(!b))
+ shuffle_array[count++] = i;
+ if (count == kShuffleArraySize) {
+ if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
+ shuffle_array, count)))
return false;
- b->Clear();
- }
- b->Add((void*)i);
- if (b->Count() == max_count) {
- sci->free_list.push_back(b);
- b = nullptr;
+ count = 0;
}
}
+ if (count) {
+ if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
+ shuffle_array, count)))
+ return false;
+ }
if (b) {
CHECK_GT(b->Count(), 0);
sci->free_list.push_back(b);