diff options
author | Dean Michael Berris <dberris@google.com> | 2018-07-18 01:53:39 +0000 |
---|---|---|
committer | Dean Michael Berris <dberris@google.com> | 2018-07-18 01:53:39 +0000 |
commit | 1d40550d23737645acb8d9f4fd90472b52328906 (patch) | |
tree | 8a46145b7b54ff6ec7d8cf75963a36465b2da312 /lib/xray/tests/unit/segmented_array_test.cc | |
parent | f8e9626b112c22014212116b8d53dcc0d2c657d1 (diff) |
[XRay][compiler-rt] Simplify Allocator Implementation
Summary:
This change simplifies the XRay Allocator implementation to self-manage
an mmap'ed memory segment instead of using the internal allocator
implementation in sanitizer_common.
We've found through benchmarks and profiling these benchmarks in D48879
that using the internal allocator in sanitizer_common introduces a
bottleneck on allocating memory through a central spinlock. This change
allows thread-local allocators to eliminate contention on the
centralized allocator.
To get the most benefit from this approach, we also use a managed
allocator for the chunk elements used by the segmented array
implementation. This gives us the chance to amortize the cost of
allocating memory when creating these internal segmented array data
structures.
We also took the opportunity to remove the preallocation argument from
the allocator API, simplifying the usage of the allocator throughout the
profiling implementation.
In this change we also tweak some of the flag values to reduce the
amount of maximum memory we use/need for each thread, when requesting
memory through mmap.
Depends on D48956.
Reviewers: kpw, eizan
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D49217
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@337342 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/xray/tests/unit/segmented_array_test.cc')
-rw-r--r-- | lib/xray/tests/unit/segmented_array_test.cc | 57 |
1 files changed, 38 insertions, 19 deletions
diff --git a/lib/xray/tests/unit/segmented_array_test.cc b/lib/xray/tests/unit/segmented_array_test.cc index 539162d7d..b48d481ff 100644 --- a/lib/xray/tests/unit/segmented_array_test.cc +++ b/lib/xray/tests/unit/segmented_array_test.cc @@ -12,27 +12,29 @@ struct TestData { TestData(s64 F, s64 S) : First(F), Second(S) {} }; -TEST(SegmentedArrayTest, Construction) { - Array<TestData> Data; - (void)Data; -} - -TEST(SegmentedArrayTest, ConstructWithAllocator) { +TEST(SegmentedArrayTest, ConstructWithAllocators) { using AllocatorType = typename Array<TestData>::AllocatorType; - AllocatorType A(1 << 4, 0); - Array<TestData> Data(A); + AllocatorType A(1 << 4); + ChunkAllocator CA(1 << 4); + Array<TestData> Data(A, CA); (void)Data; } TEST(SegmentedArrayTest, ConstructAndPopulate) { - Array<TestData> data; + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + ChunkAllocator CA(1 << 4); + Array<TestData> data(A, CA); ASSERT_NE(data.Append(TestData{0, 0}), nullptr); ASSERT_NE(data.Append(TestData{1, 1}), nullptr); ASSERT_EQ(data.size(), 2u); } TEST(SegmentedArrayTest, ConstructPopulateAndLookup) { - Array<TestData> data; + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + ChunkAllocator CA(1 << 4); + Array<TestData> data(A, CA); ASSERT_NE(data.Append(TestData{0, 1}), nullptr); ASSERT_EQ(data.size(), 1u); ASSERT_EQ(data[0].First, 0); @@ -40,7 +42,10 @@ TEST(SegmentedArrayTest, ConstructPopulateAndLookup) { } TEST(SegmentedArrayTest, PopulateWithMoreElements) { - Array<TestData> data; + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 24); + ChunkAllocator CA(1 << 20); + Array<TestData> data(A, CA); static const auto kMaxElements = 100u; for (auto I = 0u; I < kMaxElements; ++I) { ASSERT_NE(data.Append(TestData{I, I + 1}), nullptr); @@ -53,14 +58,20 @@ TEST(SegmentedArrayTest, PopulateWithMoreElements) { } TEST(SegmentedArrayTest, AppendEmplace) { - Array<TestData> data; + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + ChunkAllocator CA(1 << 4); + Array<TestData> data(A, CA); ASSERT_NE(data.AppendEmplace(1, 1), nullptr); ASSERT_EQ(data[0].First, 1); ASSERT_EQ(data[0].Second, 1); } TEST(SegmentedArrayTest, AppendAndTrim) { - Array<TestData> data; + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + ChunkAllocator CA(1 << 4); + Array<TestData> data(A, CA); ASSERT_NE(data.AppendEmplace(1, 1), nullptr); ASSERT_EQ(data.size(), 1u); data.trim(1); @@ -69,7 +80,10 @@ TEST(SegmentedArrayTest, AppendAndTrim) { } TEST(SegmentedArrayTest, IteratorAdvance) { - Array<TestData> data; + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + ChunkAllocator CA(1 << 4); + Array<TestData> data(A, CA); ASSERT_TRUE(data.empty()); ASSERT_EQ(data.begin(), data.end()); auto I0 = data.begin(); @@ -88,7 +102,10 @@ TEST(SegmentedArrayTest, IteratorAdvance) { } TEST(SegmentedArrayTest, IteratorRetreat) { - Array<TestData> data; + using AllocatorType = typename Array<TestData>::AllocatorType; + AllocatorType A(1 << 4); + ChunkAllocator CA(1 << 4); + Array<TestData> data(A, CA); ASSERT_TRUE(data.empty()); ASSERT_EQ(data.begin(), data.end()); ASSERT_NE(data.AppendEmplace(1, 1), nullptr); @@ -108,8 +125,9 @@ TEST(SegmentedArrayTest, IteratorRetreat) { TEST(SegmentedArrayTest, IteratorTrimBehaviour) { using AllocatorType = typename Array<TestData>::AllocatorType; - AllocatorType A(1 << 10, 0); - Array<TestData> Data(A); + AllocatorType A(1 << 20); + ChunkAllocator CA(1 << 10); + Array<TestData> Data(A, CA); ASSERT_TRUE(Data.empty()); auto I0Begin = Data.begin(), I0End = Data.end(); // Add enough elements in Data to have more than one chunk. @@ -160,8 +178,9 @@ struct ShadowStackEntry { TEST(SegmentedArrayTest, SimulateStackBehaviour) { using AllocatorType = typename Array<ShadowStackEntry>::AllocatorType; - AllocatorType A(1 << 10, 0); - Array<ShadowStackEntry> Data(A); + AllocatorType A(1 << 10); + ChunkAllocator CA(1 << 10); + Array<ShadowStackEntry> Data(A, CA); static uint64_t Dummy = 0; constexpr uint64_t Max = 9; |