summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--lib/sanitizer_common/sanitizer_common.h14
-rw-r--r--lib/sanitizer_common/sanitizer_fuchsia.cc31
-rw-r--r--lib/sanitizer_common/sanitizer_posix_libcdep.cc36
-rw-r--r--lib/sanitizer_common/sanitizer_win.cc37
-rw-r--r--lib/sanitizer_common/tests/sanitizer_common_test.cc62
5 files changed, 180 insertions, 0 deletions
diff --git a/lib/sanitizer_common/sanitizer_common.h b/lib/sanitizer_common/sanitizer_common.h
index ee5eca516..14b92d9c4 100644
--- a/lib/sanitizer_common/sanitizer_common.h
+++ b/lib/sanitizer_common/sanitizer_common.h
@@ -128,6 +128,20 @@ void CheckVMASize();
void RunMallocHooks(const void *ptr, uptr size);
void RunFreeHooks(const void *ptr);
+class ReservedAddressRange {
+ public:
+ uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
+ uptr Map(uptr fixed_addr, uptr size, bool tolerate_enomem = false);
+ void Unmap(uptr addr, uptr size);
+ void *base() const { return base_; }
+ uptr size() const { return size_; }
+
+ private:
+ void* base_;
+ uptr size_;
+ const char* name_;
+};
+
typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
/*out*/uptr *stats, uptr stats_size);
diff --git a/lib/sanitizer_common/sanitizer_fuchsia.cc b/lib/sanitizer_common/sanitizer_fuchsia.cc
index 21fabddb1..00a8aabbf 100644
--- a/lib/sanitizer_common/sanitizer_fuchsia.cc
+++ b/lib/sanitizer_common/sanitizer_fuchsia.cc
@@ -236,6 +236,37 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
return DoAnonymousMmapOrDie(size, mem_type, false, false);
}
+uptr ReservedAddressRange::Init(uptr init_size, const char* name = nullptr,
+ uptr fixed_addr = uptr(0)) {
+ base_ = MmapNoAccess(init_size);
+ size_ = size;
+ name_ = name;
+ return reinterpret_cast<uptr>(base_);
+}
+
+// Uses fixed_addr for now.
+// Will use offset instead once we've implemented this function for real.
+uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
+ bool tolerate_enomem = true) {
+ return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size,
+ tolerate_enomem));
+}
+
+void ReservedAddressRange::Unmap(uptr addr, uptr size) {
+ void* addr_as_void = reinterpret_cast<void*>(addr);
+ uptr base_as_uptr = reinterpret_cast<uptr>(base_);
+ // Only unmap at the beginning or end of the range.
+ CHECK_EQ((addr_as_void == base_) || (addr + size == base_as_uptr + size_),
+ true);
+ // Detect overflows.
+ CHECK_LE(size, (base_as_uptr + size_) - addr);
+ UnmapOrDie(reinterpret_cast<void*>(addr), size);
+ if (addr_as_void == base_) {
+ base_ = reinterpret_cast<void*>(addr + size);
+ }
+ size_ = size_ - size;
+}
+
// MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator.
// Instead of doing exactly what they say, we make MmapNoAccess actually
// just allocate a VMAR to reserve the address space. Then MmapFixedOrDie
diff --git a/lib/sanitizer_common/sanitizer_posix_libcdep.cc b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
index 4214d91c5..b6b09b5e4 100644
--- a/lib/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/lib/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -337,6 +337,42 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
return (void *)p;
}
+uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
+ if (fixed_addr) {
+ base_ = MmapFixedNoAccess(fixed_addr, size, name);
+ } else {
+ base_ = MmapNoAccess(size);
+ }
+ size_ = size;
+ name_ = name;
+ return reinterpret_cast<uptr>(base_);
+}
+
+// Uses fixed_addr for now.
+// Will use offset instead once we've implemented this function for real.
+uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size,
+ bool tolerate_enomem) {
+ if (tolerate_enomem) {
+ return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
+ }
+ return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
+}
+
+void ReservedAddressRange::Unmap(uptr addr, uptr size) {
+ void* addr_as_void = reinterpret_cast<void*>(addr);
+ uptr base_as_uptr = reinterpret_cast<uptr>(base_);
+ // Only unmap at the beginning or end of the range.
+ CHECK_EQ((addr_as_void == base_) || (addr + size == base_as_uptr + size_),
+ true);
+ // Detect overflows.
+ CHECK_LE(size, (base_as_uptr + size_) - addr);
+ UnmapOrDie(reinterpret_cast<void*>(addr), size);
+ if (addr_as_void == base_) {
+ base_ = reinterpret_cast<void*>(addr + size);
+ }
+ size_ = size_ - size;
+}
+
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
int fd = name ? GetNamedMappingFd(name, size) : -1;
unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;
diff --git a/lib/sanitizer_common/sanitizer_win.cc b/lib/sanitizer_common/sanitizer_win.cc
index f1a74d3f2..5bbdff951 100644
--- a/lib/sanitizer_common/sanitizer_win.cc
+++ b/lib/sanitizer_common/sanitizer_win.cc
@@ -235,6 +235,31 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
return p;
}
+// Uses fixed_addr for now.
+// Will use offset instead once we've implemented this function for real.
+uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size,
+ bool tolerate_enomem) {
+ if (tolerate_enomem) {
+ return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
+ }
+ return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
+}
+
+void ReservedAddressRange::Unmap(uptr addr, uptr size) {
+ void* addr_as_void = reinterpret_cast<void*>(addr);
+ uptr base_as_uptr = reinterpret_cast<uptr>(base_);
+ // Only unmap at the beginning or end of the range.
+ CHECK_EQ((addr_as_void == base_) || (addr + size == base_as_uptr + size_),
+ true);
+ // Detect overflows.
+ CHECK_LE(size, (base_as_uptr + size_) - addr);
+ UnmapOrDie(reinterpret_cast<void*>(addr), size);
+ if (addr_as_void == base_) {
+ base_ = reinterpret_cast<void*>(addr + size);
+ }
+ size_ = size_ - size;
+}
+
void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
void *p = VirtualAlloc((LPVOID)fixed_addr, size,
MEM_COMMIT, PAGE_READWRITE);
@@ -252,6 +277,18 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type);
}
+uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
+ if (fixed_addr) {
+ base_ = MmapFixedNoAccess(fixed_addr, size, name);
+ } else {
+ base_ = MmapNoAccess(size);
+ }
+ size_ = size;
+ name_ = name;
+ return reinterpret_cast<uptr>(base_);
+}
+
+
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
(void)name; // unsupported
void *res = VirtualAlloc((LPVOID)fixed_addr, size,
diff --git a/lib/sanitizer_common/tests/sanitizer_common_test.cc b/lib/sanitizer_common/tests/sanitizer_common_test.cc
index 9c62b4593..5efce86ce 100644
--- a/lib/sanitizer_common/tests/sanitizer_common_test.cc
+++ b/lib/sanitizer_common/tests/sanitizer_common_test.cc
@@ -320,4 +320,66 @@ TEST(SanitizerCommon, GetRandom) {
}
#endif
+TEST(SanitizerCommon, ReservedAddressRangeInit) {
+ uptr init_size = 0xffff;
+ ReservedAddressRange address_range;
+ uptr res = address_range.Init(init_size);
+ CHECK_NE(res, (void*)-1);
+ UnmapOrDie((void*)res, init_size);
+ // Should be able to map into the same space now.
+ ReservedAddressRange address_range2;
+ uptr res2 = address_range2.Init(init_size, nullptr, res);
+ CHECK_EQ(res, res2);
+
+ // TODO(flowerhack): Once this is switched to the "real" implementation
+ // (rather than passing through to MmapNoAccess*), enforce and test "no
+ // double initializations allowed"
+}
+
+TEST(SanitizerCommon, ReservedAddressRangeMap) {
+ constexpr uptr init_size = 0xffff;
+ ReservedAddressRange address_range;
+ uptr res = address_range.Init(init_size);
+ CHECK_NE(res, (void*) -1);
+
+ // Valid mappings should succeed.
+ CHECK_EQ(res, address_range.Map(res, init_size));
+
+ // Valid mappings should be readable.
+ unsigned char buffer[init_size];
+ memcpy(buffer, &res, sizeof(buffer));
+
+ // Invalid mappings should fail.
+ EXPECT_DEATH(address_range.Map(res, 0), ".*");
+
+ // TODO(flowerhack): Once this is switched to the "real" implementation, make
+ // sure you can only mmap into offsets in the Init range.
+}
+
+TEST(SanitizerCommon, ReservedAddressRangeUnmap) {
+ uptr PageSize = GetPageSizeCached();
+ uptr init_size = PageSize * 4;
+ ReservedAddressRange address_range;
+ uptr base_addr = address_range.Init(init_size);
+ CHECK_NE(base_addr, (void*)-1);
+ CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));
+
+ // Unmapping at the beginning should succeed.
+ address_range.Unmap(base_addr, PageSize);
+ CHECK_EQ(base_addr + PageSize, address_range.base());
+ CHECK_EQ(init_size - PageSize, address_range.size());
+
+ // Unmapping at the end should succeed.
+ uptr old_size = address_range.size();
+ void* old_base = address_range.base();
+ uptr new_start = reinterpret_cast<uptr>(address_range.base()) +
+ address_range.size() - PageSize;
+ address_range.Unmap(new_start, PageSize);
+ CHECK_EQ(old_size - PageSize, address_range.size());
+ CHECK_EQ(old_base, address_range.base());
+
+ // Unmapping in the middle of the ReservedAddressRange should fail.
+ EXPECT_DEATH(address_range.Unmap(base_addr + 0xf, 0xff), ".*");
+}
+
} // namespace __sanitizer