summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_combined.h
diff options
context:
space:
mode:
authorAlex Shlyapnikov <alekseys@google.com>2017-06-20 21:23:02 +0000
committerAlex Shlyapnikov <alekseys@google.com>2017-06-20 21:23:02 +0000
commitc13d2469d55325c30566bff7db800053e1bca2ca (patch)
treed8d05cdb139941b1de5b28255537d1bd25e8d3f5 /lib/sanitizer_common/sanitizer_allocator_combined.h
parent4f2195f82b6124970cefd89e4ae0e179e7b99381 (diff)
[Sanitizers] Move cached allocator_may_return_null flag to sanitizer_allocator
Summary: Move cached allocator_may_return_null flag to sanitizer_allocator.cc and provide API to consolidate and unify the behavior of all specific allocators. Make all sanitizers using CombinedAllocator to follow AllocatorReturnNullOrDieOnOOM() rules to behave the same way when OOM happens. When OOM happens, turn allocator_out_of_memory flag on regardless of allocator_may_return_null flag value (it used to not to be set when allocator_may_return_null == true). release_to_os_interval_ms and rss_limit_exceeded will likely be moved to sanitizer_allocator.cc too (later). Reviewers: eugenis Subscribers: srhines, kubamracek, llvm-commits Differential Revision: https://reviews.llvm.org/D34310 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@305858 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_combined.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_combined.h49
1 files changed, 12 insertions, 37 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h
index 90ccb1771..efd25cadf 100644
--- a/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -24,22 +24,18 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
- primary_.Init(release_to_os_interval_ms);
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
+ typedef typename SecondaryAllocator::FailureHandler FailureHandler;
- void InitLinkerInitialized(
- bool may_return_null, s32 release_to_os_interval_ms) {
- secondary_.InitLinkerInitialized(may_return_null);
+ void InitLinkerInitialized(s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.InitLinkerInitialized();
stats_.InitLinkerInitialized();
- InitCommon(may_return_null, release_to_os_interval_ms);
}
- void Init(bool may_return_null, s32 release_to_os_interval_ms) {
- secondary_.Init(may_return_null);
+ void Init(s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.Init();
stats_.Init();
- InitCommon(may_return_null, release_to_os_interval_ms);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
@@ -47,7 +43,7 @@ class CombinedAllocator {
if (size == 0)
size = 1;
if (size + alignment < size)
- return ReturnNullOrDieOnBadRequest();
+ return FailureHandler::OnBadRequest();
uptr original_size = size;
// If alignment requirements are to be fulfilled by the frontend allocator
// rather than by the primary or secondary, passing an alignment lower than
@@ -55,44 +51,24 @@ class CombinedAllocator {
// alignment check.
if (alignment > 8)
size = RoundUpTo(size, alignment);
- void *res;
- bool from_primary = primary_.CanAllocate(size, alignment);
// The primary allocator should return a 2^x aligned allocation when
// requested 2^x bytes, hence using the rounded up 'size' when being
// serviced by the primary (this is no longer true when the primary is
// using a non-fixed base address). The secondary takes care of the
// alignment without such requirement, and allocating 'size' would use
// extraneous memory, so we employ 'original_size'.
- if (from_primary)
+ void *res;
+ if (primary_.CanAllocate(size, alignment))
res = cache->Allocate(&primary_, primary_.ClassID(size));
else
res = secondary_.Allocate(&stats_, original_size, alignment);
+ if (!res)
+ return FailureHandler::OnOOM();
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
return res;
}
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDieOnBadRequest() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull(false);
- }
-
- void *ReturnNullOrDieOnOOM() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull(true);
- }
-
- void SetMayReturnNull(bool may_return_null) {
- secondary_.SetMayReturnNull(may_return_null);
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
s32 ReleaseToOSIntervalMs() const {
return primary_.ReleaseToOSIntervalMs();
}
@@ -213,6 +189,5 @@ class CombinedAllocator {
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
- atomic_uint8_t may_return_null_;
};