summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_secondary.h
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2017-06-20 21:23:02 +0000
committerAlex Shlyapnikov <alekseys@google.com>2017-06-20 21:23:02 +0000
commitc13d2469d55325c30566bff7db800053e1bca2ca (patch)
treed8d05cdb139941b1de5b28255537d1bd25e8d3f5 /lib/sanitizer_common/sanitizer_allocator_secondary.h
parent4f2195f82b6124970cefd89e4ae0e179e7b99381 (diff)
[Sanitizers] Move cached allocator_may_return_null flag to sanitizer_allocator
Summary: Move cached allocator_may_return_null flag to sanitizer_allocator.cc and provide API to consolidate and unify the behavior of all specific allocators. Make all sanitizers using CombinedAllocator to follow AllocatorReturnNullOrDieOnOOM() rules to behave the same way when OOM happens. When OOM happens, turn allocator_out_of_memory flag on regardless of allocator_may_return_null flag value (it used to not to be set when allocator_may_return_null == true). release_to_os_interval_ms and rss_limit_exceeded will likely be moved to sanitizer_allocator.cc too (later). Reviewers: eugenis Subscribers: srhines, kubamracek, llvm-commits Differential Revision: https://reviews.llvm.org/D34310 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@305858 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_secondary.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_secondary.h35
1 files changed, 9 insertions, 26 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_secondary.h b/lib/sanitizer_common/sanitizer_allocator_secondary.h
index 2c69f47ec..261dfb5e1 100644
--- a/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -17,17 +17,19 @@
// This class can (de)allocate only large chunks of memory using mmap/unmap.
// The main purpose of this allocator is to cover large and rare allocation
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
+template <class MapUnmapCallback = NoOpMapUnmapCallback,
+ class FailureHandlerT = ReturnNullOrDieOnFailure>
class LargeMmapAllocator {
public:
- void InitLinkerInitialized(bool may_return_null) {
+ typedef FailureHandlerT FailureHandler;
+
+ void InitLinkerInitialized() {
page_size_ = GetPageSizeCached();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
- void Init(bool may_return_null) {
+ void Init() {
internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized(may_return_null);
+ InitLinkerInitialized();
}
void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
@@ -37,11 +39,11 @@ class LargeMmapAllocator {
map_size += alignment;
// Overflow.
if (map_size < size)
- return ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
if (!map_beg)
- return ReturnNullOrDieOnOOM();
+ return FailureHandler::OnOOM();
CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;
@@ -75,24 +77,6 @@ class LargeMmapAllocator {
return reinterpret_cast<void*>(res);
}
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (MayReturnNull()) return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
-
- void SetMayReturnNull(bool may_return_null) {
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
void Deallocate(AllocatorStats *stat, void *p) {
Header *h = GetHeader(p);
{
@@ -278,7 +262,6 @@ class LargeMmapAllocator {
struct Stats {
uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
} stats;
- atomic_uint8_t may_return_null_;
SpinMutex mutex_;
};