summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2017-06-13 23:57:24 +0000
committerAlex Shlyapnikov <alekseys@google.com>2017-06-13 23:57:24 +0000
commit2716b49e6801990c55b0dcbfcfbed120f6a364d8 (patch)
treeea136a4ed6ae9d22a95f3ec6ae62593a126beb7a
parent5cf6036d46c149520f75ca93289e967277baa930 (diff)
[ASan] Move rss_limit_is_exceeded_ flag to ASan.
Summary: Move the OOM decision based on RSS limits out of generic allocator to ASan allocator, where it makes more sense at the moment. Reviewers: eugenis Subscribers: kubamracek, llvm-commits Differential Revision: https://reviews.llvm.org/D34180 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@305342 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/asan/asan_allocator.cc21
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_combined.h19
2 files changed, 21 insertions, 19 deletions
diff --git a/lib/asan/asan_allocator.cc b/lib/asan/asan_allocator.cc
index 7010b6023..1ded7794c 100644
--- a/lib/asan/asan_allocator.cc
+++ b/lib/asan/asan_allocator.cc
@@ -235,6 +235,8 @@ struct Allocator {
AllocatorCache fallback_allocator_cache;
QuarantineCache fallback_quarantine_cache;
+ atomic_uint8_t rss_limit_exceeded;
+
// ------------------- Options --------------------------
atomic_uint16_t min_redzone;
atomic_uint16_t max_redzone;
@@ -268,6 +270,14 @@ struct Allocator {
SharedInitCode(options);
}
+ bool RssLimitExceeded() {
+ return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
+ }
+
+ void SetRssLimitExceeded(bool limit_exceeded) {
+ atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
+ }
+
void RePoisonChunk(uptr chunk) {
// This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
@@ -363,6 +373,8 @@ struct Allocator {
AllocType alloc_type, bool can_fill) {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
+ if (RssLimitExceeded())
+ return allocator.ReturnNullOrDieOnOOM();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
@@ -400,16 +412,15 @@ struct Allocator {
AsanThread *t = GetCurrentThread();
void *allocated;
- bool check_rss_limit = true;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocated =
- allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+ allocator.Allocate(cache, needed_size, 8, false);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated =
- allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
+ allocator.Allocate(cache, needed_size, 8, false);
}
if (!allocated) return allocator.ReturnNullOrDieOnOOM();
@@ -866,8 +877,8 @@ void asan_mz_force_unlock() {
instance.ForceUnlock();
}
-void AsanSoftRssLimitExceededCallback(bool exceeded) {
- instance.allocator.SetRssLimitIsExceeded(exceeded);
+void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
+ instance.SetRssLimitExceeded(limit_exceeded);
}
} // namespace __asan
diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h
index 19e1ae9b9..2c2390b3d 100644
--- a/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -43,12 +43,12 @@ class CombinedAllocator {
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
- bool cleared = false, bool check_rss_limit = false) {
+ bool cleared = false) {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
- if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
- if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
+ if (size + alignment < size)
+ return ReturnNullOrDieOnBadRequest();
uptr original_size = size;
// If alignment requirements are to be fulfilled by the frontend allocator
// rather than by the primary or secondary, passing an alignment lower than
@@ -89,7 +89,8 @@ class CombinedAllocator {
}
void *ReturnNullOrDieOnOOM() {
- if (MayReturnNull()) return nullptr;
+ if (MayReturnNull())
+ return nullptr;
ReportAllocatorCannotReturnNull(true);
}
@@ -106,15 +107,6 @@ class CombinedAllocator {
primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
}
- bool RssLimitIsExceeded() {
- return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
- }
-
- void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
- atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
- memory_order_release);
- }
-
void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return;
if (primary_.PointerIsMine(p))
@@ -228,6 +220,5 @@ class CombinedAllocator {
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
atomic_uint8_t may_return_null_;
- atomic_uint8_t rss_limit_is_exceeded_;
};