summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/tests
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2017-06-26 22:54:10 +0000
committerAlex Shlyapnikov <alekseys@google.com>2017-06-26 22:54:10 +0000
commit9d35abce5aacd116a1f2a5ac5541934f88dbfa54 (patch)
treeac6fd909c0bf81545d5c88b0eb365267b1b85002 /lib/sanitizer_common/tests
parentf3180f887ff3b17f3c5dc654d3c1726e90bb64e4 (diff)
[Sanitizers] 64 bit allocator respects allocator_may_return_null flag
Summary: Make SizeClassAllocator64 return nullptr when it encounters OOM, which allows the entire sanitizer's allocator to follow allocator_may_return_null=1 policy (LargeMmapAllocator: D34243, SizeClassAllocator64: D34433). Reviewers: eugenis Subscribers: srhines, kubamracek, llvm-commits Differential Revision: https://reviews.llvm.org/D34540 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@306342 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/tests')
-rw-r--r--lib/sanitizer_common/tests/sanitizer_allocator_test.cc34
1 files changed, 18 insertions, 16 deletions
diff --git a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
index f256d8776..0def8ee0f 100644
--- a/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
@@ -436,30 +436,31 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
}
-template<class Allocator>
-void FailInAssertionOnOOM() {
- Allocator a;
+// Don't test OOM conditions on Win64 because it causes other tests on the same
+// machine to OOM.
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
+TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
+ Allocator64 a;
a.Init(kReleaseToOSIntervalNever);
- SizeClassAllocatorLocalCache<Allocator> cache;
+ SizeClassAllocatorLocalCache<Allocator64> cache;
memset(&cache, 0, sizeof(cache));
cache.Init(0);
AllocatorStats stats;
stats.Init();
+
const size_t kNumChunks = 128;
uint32_t chunks[kNumChunks];
+ bool allocation_failed = false;
for (int i = 0; i < 1000000; i++) {
- a.GetFromAllocator(&stats, 52, chunks, kNumChunks);
+ if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) {
+ allocation_failed = true;
+ break;
+ }
}
+ EXPECT_EQ(allocation_failed, true);
a.TestOnlyUnmap();
}
-
-// Don't test OOM conditions on Win64 because it causes other tests on the same
-// machine to OOM.
-#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
-TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
- EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
-}
#endif
TEST(SanitizerCommon, LargeMmapAllocator) {
@@ -970,9 +971,9 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
ASSERT_LT(2 * kAllocationSize, kRegionSize);
ASSERT_GT(3 * kAllocationSize, kRegionSize);
- cache.Allocate(a, kClassID);
- EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
- "The process has exhausted");
+ EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
+ EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
+ EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
const uptr Class2 = 100;
const uptr Size2 = SpecialSizeClassMap::Size(Class2);
@@ -980,11 +981,12 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
char *p[7];
for (int i = 0; i < 7; i++) {
p[i] = (char*)cache.Allocate(a, Class2);
+ EXPECT_NE(p[i], nullptr);
fprintf(stderr, "p[%d] %p s = %lx\n", i, (void*)p[i], Size2);
p[i][Size2 - 1] = 42;
if (i) ASSERT_LT(p[i - 1], p[i]);
}
- EXPECT_DEATH(cache.Allocate(a, Class2), "The process has exhausted");
+ EXPECT_EQ(cache.Allocate(a, Class2), nullptr);
cache.Deallocate(a, Class2, p[0]);
cache.Drain(a);
ASSERT_EQ(p[6][Size2 - 1], 42);