summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorReid Kleckner <rnk@google.com>2016-03-10 20:47:26 +0000
committerReid Kleckner <rnk@google.com>2016-03-10 20:47:26 +0000
commitb2c744f7f7dad3e6d7bd9c485c323b8bd0cddb5d (patch)
tree200116bca62e66104fcdb875a5970dd728119840 /lib
parent2a381f5e3fa29e4edcf638fc5b40f35724077eb0 (diff)
[Windows] Fix UnmapOrDie and MmapAlignedOrDie
Now ASan can return virtual memory to the underlying OS. Portable sanitizer runtime code needs to be aware that UnmapOrDie cannot unmap part of previous mapping. In particular, this required changing how we implement MmapAlignedOrDie on Windows, which is what Allocator32 uses. The new code first attempts to allocate memory of the given size, and if it is appropriately aligned, returns early. If not, it frees the memory and attempts to reserve size + alignment bytes. In this region there must be an aligned address. We then free the oversized mapping and request a new mapping at the aligned address immediately after. However, a thread could allocate that virtual address in between our free and allocation, so we have to retry if that allocation fails. The existing thread creation stress test managed to trigger this condition, so the code isn't totally untested. Reviewers: samsonov Differential Revision: http://reviews.llvm.org/D17431 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@263160 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib')
-rw-r--r--lib/sanitizer_common/sanitizer_common.cc21
-rw-r--r--lib/sanitizer_common/sanitizer_posix.cc20
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc77
3 files changed, 95 insertions, 23 deletions
diff --git a/lib/sanitizer_common/sanitizer_common.cc b/lib/sanitizer_common/sanitizer_common.cc
index ce91dc460..576875f22 100644
--- a/lib/sanitizer_common/sanitizer_common.cc
+++ b/lib/sanitizer_common/sanitizer_common.cc
@@ -230,27 +230,6 @@ void SortArray(uptr *array, uptr size) {
InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess);
}
-// We want to map a chunk of address space aligned to 'alignment'.
-// We do it by maping a bit more and then unmaping redundant pieces.
-// We probably can do it with fewer syscalls in some OS-dependent way.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
-// uptr PageSize = GetPageSizeCached();
- CHECK(IsPowerOfTwo(size));
- CHECK(IsPowerOfTwo(alignment));
- uptr map_size = size + alignment;
- uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
- uptr map_end = map_res + map_size;
- uptr res = map_res;
- if (res & (alignment - 1)) // Not aligned.
- res = (map_res + alignment) & ~(alignment - 1);
- uptr end = res + size;
- if (res != map_res)
- UnmapOrDie((void*)map_res, res - map_res);
- if (end != map_end)
- UnmapOrDie((void*)end, map_end - end);
- return (void*)res;
-}
-
const char *StripPathPrefix(const char *filepath,
const char *strip_path_prefix) {
if (!filepath) return nullptr;
diff --git a/lib/sanitizer_common/sanitizer_posix.cc b/lib/sanitizer_common/sanitizer_posix.cc
index a383b2158..5bcc86b3b 100644
--- a/lib/sanitizer_common/sanitizer_posix.cc
+++ b/lib/sanitizer_common/sanitizer_posix.cc
@@ -139,6 +139,26 @@ void UnmapOrDie(void *addr, uptr size) {
DecreaseTotalMmap(size);
}
+// We want to map a chunk of address space aligned to 'alignment'.
+// We do it by maping a bit more and then unmaping redundant pieces.
+// We probably can do it with fewer syscalls in some OS-dependent way.
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = size + alignment;
+ uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
+ uptr map_end = map_res + map_size;
+ uptr res = map_res;
+ if (res & (alignment - 1)) // Not aligned.
+ res = (map_res + alignment) & ~(alignment - 1);
+ uptr end = res + size;
+ if (res != map_res)
+ UnmapOrDie((void*)map_res, res - map_res);
+ if (end != map_end)
+ UnmapOrDie((void*)end, map_end - end);
+ return (void*)res;
+}
+
void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
uptr PageSize = GetPageSizeCached();
uptr p = internal_mmap(nullptr,
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index 779481246..b0424aad4 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -97,7 +97,17 @@ void UnmapOrDie(void *addr, uptr size) {
if (!size || !addr)
return;
- if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
+ // Make sure that this API is only used to unmap an entire previous mapping.
+ // Windows cannot unmap part of a previous mapping. Unfortunately,
+ // we can't check that size matches the original size because mbi.RegionSize
+ // doesn't describe the size of the full allocation if some of the pages were
+ // protected.
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
+ CHECK(mbi.AllocationBase == addr &&
+ "Windows cannot unmap part of a previous mapping");
+
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
Report("ERROR: %s failed to "
"deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
SanitizerToolName, size, size, addr, GetLastError());
@@ -105,6 +115,61 @@ void UnmapOrDie(void *addr, uptr size) {
}
}
+// We want to map a chunk of address space aligned to 'alignment'.
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+
+ // Windows will align our allocations to at least 64K.
+ alignment = Max(alignment, GetMmapGranularity());
+
+ uptr mapped_addr =
+ (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (!mapped_addr)
+ ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
+
+ // If we got it right on the first try, return. Otherwise, unmap it and go to
+ // the slow path.
+ if (IsAligned(mapped_addr, alignment))
+ return (void*)mapped_addr;
+ if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
+ ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
+
+ // If we didn't get an aligned address, overallocate, find an aligned address,
+ // unmap, and try to allocate at that aligned address.
+ int retries = 0;
+ const int kMaxRetries = 10;
+ for (; retries < kMaxRetries &&
+ (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
+ retries++) {
+ // Overallocate size + alignment bytes.
+ mapped_addr =
+ (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
+ if (!mapped_addr)
+ ReportMmapFailureAndDie(size, mem_type, "allocate aligned",
+ GetLastError());
+
+ // Find the aligned address.
+ uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
+
+ // Free the overallocation.
+ if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
+ ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
+
+ // Attempt to allocate exactly the number of bytes we need at the aligned
+ // address. This may fail for a number of reasons, in which case we continue
+ // the loop.
+ mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ }
+
+ // Fail if we can't make this work quickly.
+ if (retries == kMaxRetries && mapped_addr == 0)
+ ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
+
+ return (void *)mapped_addr;
+}
+
void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.
@@ -119,7 +184,15 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
}
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
- return MmapFixedNoReserve(fixed_addr, size);
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (p == 0) {
+ char mem_type[30];
+ internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
+ fixed_addr);
+ ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
+ }
+ return p;
}
void *MmapNoReserveOrDie(uptr size, const char *mem_type) {