summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator_combined.h
diff options
context:
space:
mode:
authorEvgeniy Stepanov <eugeni.stepanov@gmail.com>2016-11-29 00:22:50 +0000
committerEvgeniy Stepanov <eugeni.stepanov@gmail.com>2016-11-29 00:22:50 +0000
commit9eaa3ef462b338fa2426930e2d3072b43291abcf (patch)
tree267a7e011a8126b7f32027505beb5cb4bb92c136 /lib/sanitizer_common/sanitizer_allocator_combined.h
parentca0aa08d646e22e1639217be0461cef96dcb83a3 (diff)
Return memory to OS right after free (not in the async thread).
Summary: In order to avoid starting a separate thread to return unused memory to the system (the thread interferes with process startup on Android, Zygota waits for all threads to exit before fork, but this thread never exits), try to return it right after free. Reviewers: eugenis Subscribers: cryptoad, filcab, danalbert, kubabrecka, llvm-commits Patch by Aleksey Shlyapnikov. Differential Revision: https://reviews.llvm.org/D27003 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@288091 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator_combined.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator_combined.h23
1 files changed, 15 insertions, 8 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h
index 029d551e9..de96e2768 100644
--- a/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -24,21 +24,22 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- void InitCommon(bool may_return_null) {
- primary_.Init();
+ void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
- void InitLinkerInitialized(bool may_return_null) {
+ void InitLinkerInitialized(
+ bool may_return_null, s32 release_to_os_interval_ms) {
secondary_.InitLinkerInitialized(may_return_null);
stats_.InitLinkerInitialized();
- InitCommon(may_return_null);
+ InitCommon(may_return_null, release_to_os_interval_ms);
}
- void Init(bool may_return_null) {
+ void Init(bool may_return_null, s32 release_to_os_interval_ms) {
secondary_.Init(may_return_null);
stats_.Init();
- InitCommon(may_return_null);
+ InitCommon(may_return_null, release_to_os_interval_ms);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
@@ -83,6 +84,14 @@ class CombinedAllocator {
atomic_store(&may_return_null_, may_return_null, memory_order_release);
}
+ s32 ReleaseToOSIntervalMs() const {
+ return primary_.ReleaseToOSIntervalMs();
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ }
+
bool RssLimitIsExceeded() {
return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
}
@@ -193,8 +202,6 @@ class CombinedAllocator {
primary_.ForceUnlock();
}
- void ReleaseToOS() { primary_.ReleaseToOS(); }
-
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {