summaryrefslogtreecommitdiff
path: root/libsanitizer/asan/asan_rtl.cc
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/asan/asan_rtl.cc')
-rw-r--r--libsanitizer/asan/asan_rtl.cc89
1 files changed, 74 insertions, 15 deletions
diff --git a/libsanitizer/asan/asan_rtl.cc b/libsanitizer/asan/asan_rtl.cc
index 50b02453a2b0..38009d2905f2 100644
--- a/libsanitizer/asan/asan_rtl.cc
+++ b/libsanitizer/asan/asan_rtl.cc
@@ -30,6 +30,7 @@
#include "ubsan/ubsan_init.h"
#include "ubsan/ubsan_platform.h"
+uptr __asan_shadow_memory_dynamic_address; // Global interface symbol.
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
@@ -84,8 +85,8 @@ void ShowStatsAndAbort() {
// Reserve memory range [beg, end].
// We need to use inclusive range because end+1 may not be representable.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
- CHECK_EQ((beg % GetPageSizeCached()), 0);
- CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
+ CHECK_EQ((beg % GetMmapGranularity()), 0);
+ CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
void *res = MmapFixedNoReserve(beg, size, name);
@@ -261,6 +262,7 @@ static NOINLINE void force_interface_symbols() {
volatile int fake_condition = 0; // prevent dead condition elimination.
// __asan_report_* functions are noreturn, so we need a switch to prevent
// the compiler from removing any of them.
+ // clang-format off
switch (fake_condition) {
case 1: __asan_report_load1(0); break;
case 2: __asan_report_load2(0); break;
@@ -300,7 +302,14 @@ static NOINLINE void force_interface_symbols() {
case 37: __asan_unpoison_stack_memory(0, 0); break;
case 38: __asan_region_is_poisoned(0, 0); break;
case 39: __asan_describe_address(0); break;
+ case 40: __asan_set_shadow_00(0, 0); break;
+ case 41: __asan_set_shadow_f1(0, 0); break;
+ case 42: __asan_set_shadow_f2(0, 0); break;
+ case 43: __asan_set_shadow_f3(0, 0); break;
+ case 44: __asan_set_shadow_f5(0, 0); break;
+ case 45: __asan_set_shadow_f8(0, 0); break;
}
+ // clang-format on
}
static void asan_atexit() {
@@ -318,26 +327,39 @@ static void InitializeHighMemEnd() {
kHighMemEnd = GetMaxVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
- kHighMemEnd |= SHADOW_GRANULARITY * GetPageSizeCached() - 1;
+ kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
#endif // !ASAN_FIXED_MAPPING
- CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0);
+ CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
}
static void ProtectGap(uptr addr, uptr size) {
- if (!flags()->protect_shadow_gap)
+ if (!flags()->protect_shadow_gap) {
+ // The shadow gap is unprotected, so there is a chance that someone
+ // is actually using this memory. Which means it needs a shadow...
+ uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
+ uptr GapShadowEnd =
+ RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
+ if (Verbosity())
+ Printf("protect_shadow_gap=0:"
+ " not protecting shadow gap, allocating gap's shadow\n"
+ "|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg,
+ GapShadowEnd);
+ ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
+ "unprotected gap shadow");
return;
- void *res = MmapNoAccess(addr, size, "shadow gap");
+ }
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
// A few pages at the start of the address space can not be protected.
// But we really want to protect as much as possible, to prevent this memory
// being returned as a result of a non-FIXED mmap().
if (addr == kZeroBaseShadowStart) {
- uptr step = GetPageSizeCached();
+ uptr step = GetMmapGranularity();
while (size > step && addr < kZeroBaseMaxShadowStart) {
addr += step;
size -= step;
- void *res = MmapNoAccess(addr, size, "shadow gap");
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
}
@@ -413,10 +435,13 @@ static void AsanInitInternal() {
AsanCheckIncompatibleRT();
AsanCheckDynamicRTPrereqs();
+ AvoidCVE_2016_2143();
SetCanPoisonMemory(flags()->poison_heap);
SetMallocContextSize(common_flags()->malloc_context_size);
+ InitializePlatformExceptionHandlers();
+
InitializeHighMemEnd();
// Make sure we are not statically linked.
@@ -429,7 +454,6 @@ static void AsanInitInternal() {
__sanitizer_set_report_path(common_flags()->log_path);
- // Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
@@ -448,7 +472,30 @@ static void AsanInitInternal() {
ReplaceSystemMalloc();
+ // Set the shadow memory address to uninitialized.
+ __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
+
uptr shadow_start = kLowShadowBeg;
+ // Detect if a dynamic shadow address must used and find a available location
+ // when necessary. When dynamic address is used, the macro |kLowShadowBeg|
+ // expands to |__asan_shadow_memory_dynamic_address| which is
+ // |kDefaultShadowSentinel|.
+ if (shadow_start == kDefaultShadowSentinel) {
+ __asan_shadow_memory_dynamic_address = 0;
+ CHECK_EQ(0, kLowShadowBeg);
+
+ uptr granularity = GetMmapGranularity();
+ uptr alignment = 8 * granularity;
+ uptr left_padding = granularity;
+ uptr space_size = kHighShadowEnd + left_padding;
+
+ shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity);
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ }
+ // Update the shadow memory address (potentially) used by instrumentation.
+ __asan_shadow_memory_dynamic_address = shadow_start;
+
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
@@ -537,12 +584,12 @@ static void AsanInitInternal() {
force_interface_symbols(); // no-op.
SanitizerInitializeUnwinder();
-#if CAN_SANITIZE_LEAKS
- __lsan::InitCommonLsan();
- if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
- Atexit(__lsan::DoLeakCheck);
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::InitCommonLsan();
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
+ Atexit(__lsan::DoLeakCheck);
+ }
}
-#endif // CAN_SANITIZE_LEAKS
#if CAN_SANITIZE_UB
__ubsan::InitAsPlugin();
@@ -550,6 +597,15 @@ static void AsanInitInternal() {
InitializeSuppressions();
+ if (CAN_SANITIZE_LEAKS) {
+ // LateInitialize() calls dlsym, which can allocate an error string buffer
+ // in the TLS. Let's ignore the allocation to avoid reporting a leak.
+ __lsan::ScopedInterceptorDisabler disabler;
+ Symbolizer::LateInitialize();
+ } else {
+ Symbolizer::LateInitialize();
+ }
+
VReport(1, "AddressSanitizer Init done\n");
}
@@ -579,6 +635,9 @@ static AsanInitializer asan_initializer;
using namespace __asan; // NOLINT
void NOINLINE __asan_handle_no_return() {
+ if (asan_init_is_running)
+ return;
+
int local_stack;
AsanThread *curr_thread = GetCurrentThread();
uptr PageSize = GetPageSizeCached();
@@ -603,7 +662,7 @@ void NOINLINE __asan_handle_no_return() {
"stack top: %p; bottom %p; size: %p (%zd)\n"
"False positive error reports may follow\n"
"For details see "
- "http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
+ "https://github.com/google/sanitizers/issues/189\n",
top, bottom, top - bottom, top - bottom);
return;
}