summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_primary32.h
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2017-06-14 17:32:26 +0000
committerKostya Kortchinsky <kostyak@google.com>2017-06-14 17:32:26 +0000
commit6cd1865e1d8bc8fd1cb62fe4ef51c8c908385609 (patch)
treeea136a4ed6ae9d22a95f3ec6ae62593a126beb7a /lib/sanitizer_common/sanitizer_allocator_primary32.h
parentfccfd4523bbad39dfaf5e1a9dbf7491abbc80a22 (diff)
[sanitizer] Reverting D34152
Summary: This broke thread_local_quarantine_pthread_join.cc on some architectures, due to the overhead of the stashed regions. Reverting while figuring out the best way to deal with it. Reviewers: alekseyshl Reviewed By: alekseyshl Subscribers: llvm-commits, kubamracek Differential Revision: https://reviews.llvm.org/D34213 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@305404 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_primary32.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_primary32.h58
1 files changed, 8 insertions, 50 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h
index bfedbc0a7..e13510ba3 100644
--- a/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -24,7 +24,7 @@ template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
// be returned by MmapOrDie().
//
// Region:
-// a result of an allocation of kRegionSize bytes aligned on kRegionSize.
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
// Since the regions are aligned by kRegionSize, there are exactly
// kNumPossibleRegions possible regions in the address space and so we keep
// a ByteMap possible_regions to store the size classes of each Region.
@@ -106,7 +106,6 @@ class SizeClassAllocator32 {
void Init(s32 release_to_os_interval_ms) {
possible_regions.TestOnlyInit();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
- num_stashed_regions = 0;
}
s32 ReleaseToOSIntervalMs() const {
@@ -276,49 +275,15 @@ class SizeClassAllocator32 {
return mem & ~(kRegionSize - 1);
}
- // Allocates a region of kRegionSize bytes, aligned on kRegionSize. If we get
- // more than one region back (in the event the allocation is aligned on the
- // first try), attempt to store the second region into a stash. If the stash
- // is full, just unmap the superfluous memory.
- uptr AllocateRegionSlow(AllocatorStats *stat) {
- uptr map_size = kRegionSize;
- uptr padding_chunk;
- uptr region = reinterpret_cast<uptr>(
- MmapAlignedOrDie(kRegionSize, kRegionSize, "SizeClassAllocator32",
- &padding_chunk));
- if (padding_chunk) {
- // We have an extra region, attempt to stash it.
- CHECK_EQ(padding_chunk, region + kRegionSize);
- bool trim_extra = true;
- {
- SpinMutexLock l(&regions_stash_mutex);
- if (num_stashed_regions < kMaxStashedRegions) {
- regions_stash[num_stashed_regions++] = padding_chunk;
- map_size = 2 * kRegionSize;
- trim_extra = false;
- }
- }
- if (trim_extra)
- UnmapOrDie((void*)padding_chunk, kRegionSize);
- }
- MapUnmapCallback().OnMap(region, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- return region;
- }
-
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
CHECK_LT(class_id, kNumClasses);
- uptr region = 0;
- {
- SpinMutexLock l(&regions_stash_mutex);
- if (num_stashed_regions > 0)
- region = regions_stash[--num_stashed_regions];
- }
- if (!region)
- region = AllocateRegionSlow(stat);
- CHECK(IsAligned(region, kRegionSize));
- possible_regions.set(ComputeRegionId(region), static_cast<u8>(class_id));
- return region;
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ return res;
}
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
@@ -351,13 +316,6 @@ class SizeClassAllocator32 {
}
}
- // Unless several threads request regions simultaneously from different size
- // classes, the stash rarely contains more than 1 entry.
- static const uptr kMaxStashedRegions = 8;
- SpinMutex regions_stash_mutex;
- uptr num_stashed_regions;
- uptr regions_stash[kMaxStashedRegions];
-
ByteMap possible_regions;
SizeClassInfo size_class_info_array[kNumClasses];
};