summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_win.cc
diff options
context:
space:
mode:
authorPetr Hosek <phosek@chromium.org>2017-10-11 19:17:35 +0000
committerPetr Hosek <phosek@chromium.org>2017-10-11 19:17:35 +0000
commitc37be534a252a7cb8e2ab04b15256232394dd7a9 (patch)
tree145e8b2d2e1d28c3e0125e6f33eb77fd7bbabb71 /lib/sanitizer_common/sanitizer_win.cc
parentf5ee4a4c0f97f94f71d007a18480ca4b6c023075 (diff)
[sanitizer] Introduce ReservedAddressRange to sanitizer_common
In Fuchsia, MmapNoAccess/MmapFixedOrDie are implemented using a global VMAR, which means that MmapNoAccess can only be called once. This works for the sanitizer allocator but *not* for the Scudo allocator. Hence, this changeset introduces a new ReservedAddressRange object to serve as the new API for these calls. In this changeset, the object still calls into the old Mmap implementations. The next changeset two changesets will convert the sanitizer and scudo allocators to use the new APIs, respectively. (ReservedAddressRange will replace the SecondaryHeader in Scudo.) Finally, a last changeset will update the Fuchsia implementation. Patch by Julia Hansbrough Differential Revision: https://reviews.llvm.org/D38759 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@315493 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_win.cc')
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc37
1 files changed, 37 insertions, 0 deletions
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index f1a74d3f2..762ba46cc 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -235,6 +235,31 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
return p;
}
+// Uses fixed_addr for now.
+// Will use offset instead once we've implemented this function for real.
+uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size,
+ bool tolerate_enomem) {
+ if (tolerate_enomem) {
+ return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
+ }
+ return reinterpret_cast<uptr>(MmapFixedOrDie(uptr fixed_addr, uptr size));
+}
+
+void ReservedAddressRange::Unmap(uptr addr, uptr size) {
+ void* addr_as_void = reinterpret_cast<void*>(addr);
+ uptr base_as_uptr = reinterpret_cast<uptr>(base_);
+ // Only unmap at the beginning or end of the range.
+ CHECK_EQ((addr_as_void == base_) || (addr + size == base_as_uptr + size_),
+ true);
+ // Detect overflows.
+ CHECK_LE(size, (base_as_uptr + size_) - addr);
+ UnmapOrDie(reinterpret_cast<void*>(addr), size);
+ if (addr_as_void == base_) {
+ base_ = reinterpret_cast<void*>(addr + size);
+ }
+ size_ = size_ - size;
+}
+
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_COMMIT, PAGE_READWRITE);
@@ -252,6 +277,18 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type);
}
+uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
+ if (fixed_addr) {
+ base_ = MmapFixedNoAccess(fixed_addr, size, name);
+ } else {
+ base_ = MmapNoAccess(size);
+ }
+ size_ = size;
+ name_ = name;
+ return reinterpret_cast<uptr>(base_);
+}
+
+
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
(void)name; // unsupported
void *res = VirtualAlloc((LPVOID)fixed_addr, size,