From d2e59ebfdaa0a5c9ed5a5324889b25172ce1e97a Mon Sep 17 00:00:00 2001 From: Evgeniy Stepanov Date: Wed, 30 Nov 2016 20:41:59 +0000 Subject: Release memory to OS only when the requested range covers the entire page Summary: The current code was sometimes attempting to release huge chunks of memory due to undesired RoundUp/RoundDown interaction when the requested range is fully contained within one memory page. Reviewers: eugenis Subscribers: kubabrecka, llvm-commits Patch by Aleksey Shlyapnikov. Differential Revision: https://reviews.llvm.org/D27228 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@288271 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/msan/msan_allocator.cc | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'lib/msan') diff --git a/lib/msan/msan_allocator.cc b/lib/msan/msan_allocator.cc index a82e89845..6c389f008 100644 --- a/lib/msan/msan_allocator.cc +++ b/lib/msan/msan_allocator.cc @@ -33,9 +33,12 @@ struct MsanMapUnmapCallback { // We are about to unmap a chunk of user memory. // Mark the corresponding shadow memory as not needed. - ReleaseMemoryToOS(MEM_TO_SHADOW(p), size); - if (__msan_get_track_origins()) - ReleaseMemoryToOS(MEM_TO_ORIGIN(p), size); + uptr shadow_p = MEM_TO_SHADOW(p); + ReleaseMemoryPagesToOS(shadow_p, shadow_p + size); + if (__msan_get_track_origins()) { + uptr origin_p = MEM_TO_ORIGIN(p); + ReleaseMemoryPagesToOS(origin_p, origin_p + size); + } } }; -- cgit v1.2.3