diff options
author | Alex Shlyapnikov <alekseys@google.com> | 2017-10-23 17:12:07 +0000 |
---|---|---|
committer | Alex Shlyapnikov <alekseys@google.com> | 2017-10-23 17:12:07 +0000 |
commit | a21db44b446dc6e7db373e6cc4c57c9cdbd768c9 (patch) | |
tree | acc1755d97ff247ff926634642d1d8ef5cba3fe1 /lib/sanitizer_common | |
parent | e378f8791d2d42e4c3f26033ba20a1bf4b43d2d5 (diff) |
[Sanitizers] New sanitizer API to purge allocator quarantine.
Summary:
Purging allocator quarantine and returning memory to OS might be desired
between fuzzer iterations since, most likely, the quarantine is not
going to catch bugs in the code under fuzz, but reducing RSS might
significantly prolong the fuzzing session.
Reviewers: cryptoad
Subscribers: kubamracek, llvm-commits
Differential Revision: https://reviews.llvm.org/D39153
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@316347 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common')
6 files changed, 50 insertions, 20 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator_combined.h b/lib/sanitizer_common/sanitizer_allocator_combined.h index efd25cadf..0d8a2a174 100644 --- a/lib/sanitizer_common/sanitizer_allocator_combined.h +++ b/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -77,6 +77,10 @@ class CombinedAllocator { primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms); } + void ForceReleaseToOS() { + primary_.ForceReleaseToOS(); + } + void Deallocate(AllocatorCache *cache, void *p) { if (!p) return; if (primary_.PointerIsMine(p)) diff --git a/lib/sanitizer_common/sanitizer_allocator_interface.h b/lib/sanitizer_common/sanitizer_allocator_interface.h index 13910e719..2f5ce3151 100644 --- a/lib/sanitizer_common/sanitizer_allocator_interface.h +++ b/lib/sanitizer_common/sanitizer_allocator_interface.h @@ -39,6 +39,9 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void __sanitizer_free_hook(void *ptr); SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void +__sanitizer_purge_allocator(); + +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void __sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts); } // extern "C" diff --git a/lib/sanitizer_common/sanitizer_allocator_primary32.h b/lib/sanitizer_common/sanitizer_allocator_primary32.h index 8f8a71d67..8abf2f9f5 100644 --- a/lib/sanitizer_common/sanitizer_allocator_primary32.h +++ b/lib/sanitizer_common/sanitizer_allocator_primary32.h @@ -120,6 +120,10 @@ class SizeClassAllocator32 { // This is empty here. Currently only implemented in 64-bit allocator. } + void ForceReleaseToOS() { + // Currently implemented in 64-bit allocator only. + } + void *MapWithCallback(uptr size) { void *res = MmapOrDie(size, "SizeClassAllocator32"); MapUnmapCallback().OnMap((uptr)res, size); diff --git a/lib/sanitizer_common/sanitizer_allocator_primary64.h b/lib/sanitizer_common/sanitizer_allocator_primary64.h index 931e01c28..1635efe83 100644 --- a/lib/sanitizer_common/sanitizer_allocator_primary64.h +++ b/lib/sanitizer_common/sanitizer_allocator_primary64.h @@ -92,6 +92,13 @@ class SizeClassAllocator64 { memory_order_relaxed); } + void ForceReleaseToOS() { + for (uptr class_id = 1; class_id < kNumClasses; class_id++) { + BlockingMutexLock l(&GetRegionInfo(class_id)->mutex); + MaybeReleaseToOS(class_id, true /*force*/); + } + } + static bool CanAllocate(uptr size, uptr alignment) { return size <= SizeClassMap::kMaxSize && alignment <= SizeClassMap::kMaxSize; @@ -116,7 +123,7 @@ class SizeClassAllocator64 { region->num_freed_chunks = new_num_freed_chunks; region->stats.n_freed += n_chunks; - MaybeReleaseToOS(class_id); + MaybeReleaseToOS(class_id, false /*force*/); } NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id, @@ -786,7 +793,7 @@ class SizeClassAllocator64 { // Attempts to release RAM occupied by freed chunks back to OS. The region is // expected to be locked. - void MaybeReleaseToOS(uptr class_id) { + void MaybeReleaseToOS(uptr class_id, bool force) { RegionInfo *region = GetRegionInfo(class_id); const uptr chunk_size = ClassIdToSize(class_id); const uptr page_size = GetPageSizeCached(); @@ -799,12 +806,16 @@ class SizeClassAllocator64 { return; // Nothing new to release. } - s32 interval_ms = ReleaseToOSIntervalMs(); - if (interval_ms < 0) - return; + if (!force) { + s32 interval_ms = ReleaseToOSIntervalMs(); + if (interval_ms < 0) + return; - if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > NanoTime()) - return; // Memory was returned recently. + if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > + NanoTime()) { + return; // Memory was returned recently. + } + } MemoryMapper memory_mapper(*this, class_id); diff --git a/lib/sanitizer_common/sanitizer_common_interface.inc b/lib/sanitizer_common/sanitizer_common_interface.inc index 550427c90..2568df3a5 100644 --- a/lib/sanitizer_common/sanitizer_common_interface.inc +++ b/lib/sanitizer_common/sanitizer_common_interface.inc @@ -34,6 +34,7 @@ INTERFACE_FUNCTION(__sanitizer_get_heap_size) INTERFACE_FUNCTION(__sanitizer_get_ownership) INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes) INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks) +INTERFACE_FUNCTION(__sanitizer_purge_allocator) INTERFACE_FUNCTION(__sanitizer_print_memory_profile) INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook) INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook) diff --git a/lib/sanitizer_common/sanitizer_quarantine.h b/lib/sanitizer_common/sanitizer_quarantine.h index db38867ce..886445a27 100644 --- a/lib/sanitizer_common/sanitizer_quarantine.h +++ b/lib/sanitizer_common/sanitizer_quarantine.h @@ -87,15 +87,14 @@ class Quarantine { // is zero (it allows us to perform just one atomic read per Put() call). CHECK((size == 0 && cache_size == 0) || cache_size != 0); - atomic_store(&max_size_, size, memory_order_relaxed); - atomic_store(&min_size_, size / 10 * 9, - memory_order_relaxed); // 90% of max size. - atomic_store(&max_cache_size_, cache_size, memory_order_relaxed); + atomic_store_relaxed(&max_size_, size); + atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size. + atomic_store_relaxed(&max_cache_size_, cache_size); } - uptr GetSize() const { return atomic_load(&max_size_, memory_order_relaxed); } + uptr GetSize() const { return atomic_load_relaxed(&max_size_); } uptr GetCacheSize() const { - return atomic_load(&max_cache_size_, memory_order_relaxed); + return atomic_load_relaxed(&max_cache_size_); } void Put(Cache *c, Callback cb, Node *ptr, uptr size) { @@ -117,7 +116,16 @@ class Quarantine { cache_.Transfer(c); } if (cache_.Size() > GetSize() && recycle_mutex_.TryLock()) - Recycle(cb); + Recycle(atomic_load_relaxed(&min_size_), cb); + } + + void NOINLINE DrainAndRecycle(Cache *c, Callback cb) { + { + SpinMutexLock l(&cache_mutex_); + cache_.Transfer(c); + } + recycle_mutex_.Lock(); + Recycle(0, cb); } void PrintStats() const { @@ -139,9 +147,8 @@ class Quarantine { Cache cache_; char pad2_[kCacheLineSize]; - void NOINLINE Recycle(Callback cb) { + void NOINLINE Recycle(uptr min_size, Callback cb) { Cache tmp; - uptr min_size = atomic_load(&min_size_, memory_order_relaxed); { SpinMutexLock l(&cache_mutex_); // Go over the batches and merge partially filled ones to @@ -201,7 +208,7 @@ class QuarantineCache { // Total memory used, including internal accounting. uptr Size() const { - return atomic_load(&size_, memory_order_relaxed); + return atomic_load_relaxed(&size_); } // Memory used for internal accounting. @@ -225,7 +232,7 @@ class QuarantineCache { list_.append_back(&from_cache->list_); SizeAdd(from_cache->Size()); - atomic_store(&from_cache->size_, 0, memory_order_relaxed); + atomic_store_relaxed(&from_cache->size_, 0); } void EnqueueBatch(QuarantineBatch *b) { @@ -296,10 +303,10 @@ class QuarantineCache { atomic_uintptr_t size_; void SizeAdd(uptr add) { - atomic_store(&size_, Size() + add, memory_order_relaxed); + atomic_store_relaxed(&size_, Size() + add); } void SizeSub(uptr sub) { - atomic_store(&size_, Size() - sub, memory_order_relaxed); + atomic_store_relaxed(&size_, Size() - sub); } }; |