From 4f20a94a11e63df059a5d315b30b8ddf22b10959 Mon Sep 17 00:00:00 2001 From: Evgeniy Stepanov Date: Wed, 13 Dec 2017 01:16:34 +0000 Subject: [hwasan] Inline instrumentation & fixed shadow. Summary: This brings CPU overhead on bzip2 down from 5.5x to 2x. Reviewers: kcc, alekseyshl Subscribers: kubamracek, hiraditya, llvm-commits Differential Revision: https://reviews.llvm.org/D41137 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@320538 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/hwasan/hwasan.cc | 36 ++++---- lib/hwasan/hwasan.h | 8 +- lib/hwasan/hwasan_interface_internal.h | 8 -- lib/hwasan/hwasan_linux.cc | 157 ++++++++++++++++++++++----------- 4 files changed, 128 insertions(+), 81 deletions(-) (limited to 'lib') diff --git a/lib/hwasan/hwasan.cc b/lib/hwasan/hwasan.cc index 40012c130..fcc40eb90 100644 --- a/lib/hwasan/hwasan.cc +++ b/lib/hwasan/hwasan.cc @@ -238,10 +238,11 @@ void __sanitizer_unaligned_store64(uu64 *p, u64 x) { *p = x; } +template __attribute__((always_inline)) static void SigIll() { #if defined(__aarch64__) - asm("hlt #0x1\n\t"); + asm("hlt %0\n\t" ::"n"(X)); #elif defined(__x86_64__) || defined(__i386__) asm("ud2\n\t"); #else @@ -251,15 +252,16 @@ static void SigIll() { // __builtin_unreachable(); } +template __attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) { tag_t ptr_tag = GetTagFromPointer(p); uptr ptr_raw = p & ~kAddressTagMask; tag_t mem_tag = *(tag_t *)MEM_TO_SHADOW(ptr_raw); - if (ptr_tag != mem_tag) - SigIll(); + if (UNLIKELY(ptr_tag != mem_tag)) SigIll<0x100 + 0x10 * IsStore + LogSize>(); } +template __attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p, uptr sz) { CHECK_NE(0, sz); @@ -268,22 +270,22 @@ static void CheckAddressSized(uptr p, uptr sz) { tag_t *shadow_first = (tag_t *)MEM_TO_SHADOW(ptr_raw); tag_t *shadow_last = (tag_t *)MEM_TO_SHADOW(ptr_raw + sz - 1); for (tag_t *t = shadow_first; t <= shadow_last; ++t) - if (ptr_tag != *t) SigIll(); + if (UNLIKELY(ptr_tag != *t)) SigIll<0x100 + 0x10 * IsStore + 0xf>(); } -void __hwasan_load(uptr p, uptr sz) { CheckAddressSized(p, sz); } -void __hwasan_load1(uptr p) { CheckAddress(p); } -void __hwasan_load2(uptr p) { CheckAddress(p); } -void __hwasan_load4(uptr p) { CheckAddress(p); } -void __hwasan_load8(uptr p) { CheckAddress(p); } -void __hwasan_load16(uptr p) { CheckAddress(p); } - -void __hwasan_store(uptr p, uptr sz) { CheckAddressSized(p, sz); } -void __hwasan_store1(uptr p) { CheckAddress(p); } -void __hwasan_store2(uptr p) { CheckAddress(p); } -void __hwasan_store4(uptr p) { CheckAddress(p); } -void __hwasan_store8(uptr p) { CheckAddress(p); } -void __hwasan_store16(uptr p) { CheckAddress(p); } +void __hwasan_load(uptr p, uptr sz) { CheckAddressSized(p, sz); } +void __hwasan_load1(uptr p) { CheckAddress(p); } +void __hwasan_load2(uptr p) { CheckAddress(p); } +void __hwasan_load4(uptr p) { CheckAddress(p); } +void __hwasan_load8(uptr p) { CheckAddress(p); } +void __hwasan_load16(uptr p) { CheckAddress(p); } + +void __hwasan_store(uptr p, uptr sz) { CheckAddressSized(p, sz); } +void __hwasan_store1(uptr p) { CheckAddress(p); } +void __hwasan_store2(uptr p) { CheckAddress(p); } +void __hwasan_store4(uptr p) { CheckAddress(p); } +void __hwasan_store8(uptr p) { CheckAddress(p); } +void __hwasan_store16(uptr p) { CheckAddress(p); } #if !SANITIZER_SUPPORTS_WEAK_HOOKS extern "C" { diff --git a/lib/hwasan/hwasan.h b/lib/hwasan/hwasan.h index 8ced45e32..bcf5282dc 100644 --- a/lib/hwasan/hwasan.h +++ b/lib/hwasan/hwasan.h @@ -37,12 +37,8 @@ const uptr kShadowScale = 4; const uptr kShadowAlignment = 1UL << kShadowScale; #define MEM_TO_SHADOW_OFFSET(mem) ((uptr)(mem) >> kShadowScale) -#define MEM_TO_SHADOW(mem) \ - (((uptr)(mem) >> kShadowScale) + \ - __hwasan_shadow_memory_dynamic_address_internal) -#define SHADOW_TO_MEM(shadow) \ - (((uptr)(shadow)-__hwasan_shadow_memory_dynamic_address_internal) \ - << kShadowScale) +#define MEM_TO_SHADOW(mem) ((uptr)(mem) >> kShadowScale) +#define SHADOW_TO_MEM(shadow) ((uptr)(shadow) << kShadowScale) #define MEM_IS_APP(mem) true diff --git a/lib/hwasan/hwasan_interface_internal.h b/lib/hwasan/hwasan_interface_internal.h index 07f73ce22..08b77534e 100644 --- a/lib/hwasan/hwasan_interface_internal.h +++ b/lib/hwasan/hwasan_interface_internal.h @@ -31,14 +31,6 @@ using __sanitizer::u32; using __sanitizer::u16; using __sanitizer::u8; -SANITIZER_INTERFACE_ATTRIBUTE -extern uptr __hwasan_shadow_memory_dynamic_address; - -// Hidden alias for internal access. -__attribute__((visibility("hidden"))) -extern uptr __hwasan_shadow_memory_dynamic_address_internal; - - SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_load(uptr, uptr); SANITIZER_INTERFACE_ATTRIBUTE diff --git a/lib/hwasan/hwasan_linux.cc b/lib/hwasan/hwasan_linux.cc index c07f9928a..9b8613171 100644 --- a/lib/hwasan/hwasan_linux.cc +++ b/lib/hwasan/hwasan_linux.cc @@ -32,18 +32,91 @@ #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_procmaps.h" -uptr __hwasan_shadow_memory_dynamic_address; +namespace __hwasan { -__attribute__((alias("__hwasan_shadow_memory_dynamic_address"))) -extern uptr __hwasan_shadow_memory_dynamic_address_internal; +void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { + CHECK_EQ((beg % GetMmapGranularity()), 0); + CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); + uptr size = end - beg + 1; + DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. + void *res = MmapFixedNoReserve(beg, size, name); + if (res != (void *)beg) { + Report( + "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " + "Perhaps you're using ulimit -v\n", + size); + Abort(); + } + if (common_flags()->no_huge_pages_for_shadow) NoHugePagesInRegion(beg, size); + if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size); +} -namespace __hwasan { +static void ProtectGap(uptr addr, uptr size) { + void *res = MmapFixedNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) return; + // A few pages at the start of the address space can not be protected. + // But we really want to protect as much as possible, to prevent this memory + // being returned as a result of a non-FIXED mmap(). + if (addr == 0) { + uptr step = GetMmapGranularity(); + while (size > step) { + addr += step; + size -= step; + void *res = MmapFixedNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) return; + } + } + + Report( + "ERROR: Failed to protect the shadow gap. " + "ASan cannot proceed correctly. ABORTING.\n"); + DumpProcessMap(); + Die(); +} bool InitShadow() { const uptr maxVirtualAddress = GetMaxUserVirtualAddress(); - uptr shadow_size = MEM_TO_SHADOW_OFFSET(maxVirtualAddress) + 1; - __hwasan_shadow_memory_dynamic_address = - reinterpret_cast(MmapNoReserveOrDie(shadow_size, "shadow")); + + // LowMem covers as much of the first 4GB as possible. + const uptr kLowMemEnd = 1UL<<32; + const uptr kLowShadowEnd = kLowMemEnd >> kShadowScale; + const uptr kLowShadowStart = kLowShadowEnd >> kShadowScale; + + // HighMem covers the upper part of the address space. + const uptr kHighShadowEnd = (maxVirtualAddress >> kShadowScale) + 1; + const uptr kHighShadowStart = Max(kLowMemEnd, kHighShadowEnd >> kShadowScale); + CHECK(kHighShadowStart < kHighShadowEnd); + + const uptr kHighMemStart = kHighShadowStart << kShadowScale; + CHECK(kHighShadowEnd <= kHighMemStart); + + if (Verbosity()) { + Printf("|| `[%p, %p]` || HighMem ||\n", (void *)kHighMemStart, + (void *)maxVirtualAddress); + if (kHighMemStart > kHighShadowEnd) + Printf("|| `[%p, %p]` || ShadowGap2 ||\n", (void *)kHighShadowEnd, + (void *)kHighMemStart); + Printf("|| `[%p, %p]` || HighShadow ||\n", (void *)kHighShadowStart, + (void *)kHighShadowEnd); + if (kHighShadowStart > kLowMemEnd) + Printf("|| `[%p, %p]` || ShadowGap2 ||\n", (void *)kHighShadowEnd, + (void *)kHighMemStart); + Printf("|| `[%p, %p]` || LowMem ||\n", (void *)kLowShadowEnd, + (void *)kLowMemEnd); + Printf("|| `[%p, %p]` || LowShadow ||\n", (void *)kLowShadowStart, + (void *)kLowShadowEnd); + Printf("|| `[%p, %p]` || ShadowGap1 ||\n", (void *)0, + (void *)kLowShadowStart); + } + + ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd - 1, "low shadow"); + ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd - 1, "high shadow"); + ProtectGap(0, kLowShadowStart); + if (kHighShadowStart > kLowMemEnd) + ProtectGap(kLowMemEnd, kHighShadowStart - kLowMemEnd); + if (kHighMemStart > kHighShadowEnd) + ProtectGap(kHighShadowEnd, kHighMemStart - kHighShadowEnd); + return true; } @@ -105,45 +178,28 @@ struct AccessInfo { #if defined(__aarch64__) static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) { + // Access type is encoded in HLT immediate as 0x1XY, + // where X is 1 for store, 0 for load. + // Valid values of Y are 0 to 4, which are interpreted as log2(access_size), + // and 0xF, which means that access size is stored in X1 register. + // Access address is always in X0 register. AccessInfo ai; uptr pc = (uptr)info->si_addr; - - struct { - uptr addr; - unsigned size; - bool is_store; - } handlers[] = { - {(uptr)&__hwasan_load1, 1, false}, {(uptr)&__hwasan_load2, 2, false}, - {(uptr)&__hwasan_load4, 4, false}, {(uptr)&__hwasan_load8, 8, false}, - {(uptr)&__hwasan_load16, 16, false}, {(uptr)&__hwasan_load, 0, false}, - {(uptr)&__hwasan_store1, 1, true}, {(uptr)&__hwasan_store2, 2, true}, - {(uptr)&__hwasan_store4, 4, true}, {(uptr)&__hwasan_store8, 8, true}, - {(uptr)&__hwasan_store16, 16, true}, {(uptr)&__hwasan_store, 0, true}}; - int best = -1; - uptr best_distance = 0; - for (size_t i = 0; i < sizeof(handlers) / sizeof(handlers[0]); ++i) { - uptr handler = handlers[i].addr; - // Don't accept pc == handler: HLT is never the first instruction. - if (pc <= handler) continue; - uptr distance = pc - handler; - if (distance > 256) continue; - if (best == -1 || best_distance > distance) { - best = i; - best_distance = distance; - } - } - - // Not ours. - if (best == -1) - return AccessInfo{0, 0, false, false}; - - ai.is_store = handlers[best].is_store; - ai.is_load = !handlers[best].is_store; - ai.size = handlers[best].size; - + unsigned code = ((*(u32 *)pc) >> 5) & 0xffff; + if ((code & 0xff00) != 0x100) + return AccessInfo{0, 0, false, false}; // Not ours. + bool is_store = code & 0x10; + unsigned size_log = code & 0xff; + if (size_log > 4 && size_log != 0xf) + return AccessInfo{0, 0, false, false}; // Not ours. + + ai.is_store = is_store; + ai.is_load = !is_store; ai.addr = uc->uc_mcontext.regs[0]; - if (ai.size == 0) + if (size_log == 0xf) ai.size = uc->uc_mcontext.regs[1]; + else + ai.size = 1U << size_log; return ai; } #else @@ -152,11 +208,11 @@ static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) { } #endif -static void HwasanOnSIGILL(int signo, siginfo_t *info, ucontext_t *uc) { +static bool HwasanOnSIGILL(int signo, siginfo_t *info, ucontext_t *uc) { SignalContext sig{info, uc}; AccessInfo ai = GetAccessInfo(info, uc); if (!ai.is_store && !ai.is_load) - return; + return false; InternalScopedBuffer stack_buffer(1); BufferedStackTrace *stack = stack_buffer.data(); @@ -169,8 +225,9 @@ static void HwasanOnSIGILL(int signo, siginfo_t *info, ucontext_t *uc) { ++hwasan_report_count; if (flags()->halt_on_error) Die(); - else - uc->uc_mcontext.pc += 4; + + uc->uc_mcontext.pc += 4; + return true; } static void OnStackUnwind(const SignalContext &sig, const void *, @@ -181,11 +238,11 @@ static void OnStackUnwind(const SignalContext &sig, const void *, void HwasanOnDeadlySignal(int signo, void *info, void *context) { // Probably a tag mismatch. - // FIXME: detect pc range in __hwasan_load* or __hwasan_store*. if (signo == SIGILL) - HwasanOnSIGILL(signo, (siginfo_t *)info, (ucontext_t*)context); - else - HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr); + if (HwasanOnSIGILL(signo, (siginfo_t *)info, (ucontext_t*)context)) + return; + + HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr); } -- cgit v1.2.3