diff options
author | Kuba Mracek <mracek@apple.com> | 2017-11-29 19:27:25 +0000 |
---|---|---|
committer | Kuba Mracek <mracek@apple.com> | 2017-11-29 19:27:25 +0000 |
commit | 0a7288064854a3ff716d928a0ab72fdd2bbddb72 (patch) | |
tree | 81d7453cf3a8ab393d237a36826baf7546699664 /lib/xray | |
parent | 17c071e3dcbb1ee6d107a12096ec26750fb816c0 (diff) |
[sanitizer] Refactor how assembly files are handled
This renames ASM_TSAN_SYMBOL and ASM_TSAN_SYMBOL_INTERCEPTOR to just ASM_SYMBOL and ASM_SYMBOL_INTERCEPTOR, because they can be useful in more places than just TSan. Also introduce a CMake function to add ASM sources to a target.
Differential Revision: https://reviews.llvm.org/D40143
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@319339 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/xray')
-rw-r--r-- | lib/xray/CMakeLists.txt | 8 | ||||
-rw-r--r-- | lib/xray/xray_trampoline_x86_64.S | 32 |
2 files changed, 17 insertions, 23 deletions
diff --git a/lib/xray/CMakeLists.txt b/lib/xray/CMakeLists.txt index dcf0efc72..fd00f3a40 100644 --- a/lib/xray/CMakeLists.txt +++ b/lib/xray/CMakeLists.txt @@ -66,13 +66,7 @@ set(XRAY_COMMON_RUNTIME_OBJECT_LIBS if (APPLE) set(XRAY_LINK_LIBS ${SANITIZER_COMMON_LINK_LIBS}) - set(XRAY_ASM_SOURCES xray_trampoline_x86_64.S) - - if (${CMAKE_GENERATOR} STREQUAL "Xcode") - enable_language(ASM) - else() - set_source_files_properties(${XRAY_ASM_SOURCES} PROPERTIES LANGUAGE C) - endif() + add_asm_sources(XRAY_ASM_SOURCES xray_trampoline_x86_64.S) add_weak_symbols("sanitizer_common" WEAK_SYMBOL_LINK_FLAGS) add_weak_symbols("xray" WEAK_SYMBOL_LINK_FLAGS) diff --git a/lib/xray/xray_trampoline_x86_64.S b/lib/xray/xray_trampoline_x86_64.S index 0ae85c78a..350afd926 100644 --- a/lib/xray/xray_trampoline_x86_64.S +++ b/lib/xray/xray_trampoline_x86_64.S @@ -87,16 +87,16 @@ //===----------------------------------------------------------------------===// - .globl ASM_TSAN_SYMBOL(__xray_FunctionEntry) + .globl ASM_SYMBOL(__xray_FunctionEntry) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionEntry) -ASM_TSAN_SYMBOL(__xray_FunctionEntry): +ASM_SYMBOL(__xray_FunctionEntry): CFI_STARTPROC SAVE_REGISTERS // This load has to be atomic, it's concurrent with __xray_patch(). // On x86/amd64, a simple (type-aligned) MOV instruction is enough. - movq ASM_TSAN_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax + movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax testq %rax, %rax je .Ltmp0 @@ -113,10 +113,10 @@ ASM_TSAN_SYMBOL(__xray_FunctionEntry): //===----------------------------------------------------------------------===// - .globl ASM_TSAN_SYMBOL(__xray_FunctionExit) + .globl ASM_SYMBOL(__xray_FunctionExit) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionExit) -ASM_TSAN_SYMBOL(__xray_FunctionExit): +ASM_SYMBOL(__xray_FunctionExit): CFI_STARTPROC // Save the important registers first. Since we're assuming that this // function is only jumped into, we only preserve the registers for @@ -128,7 +128,7 @@ ASM_TSAN_SYMBOL(__xray_FunctionExit): movupd %xmm1, 16(%rsp) movq %rax, 8(%rsp) movq %rdx, 0(%rsp) - movq ASM_TSAN_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax + movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax testq %rax,%rax je .Ltmp2 @@ -151,14 +151,14 @@ ASM_TSAN_SYMBOL(__xray_FunctionExit): //===----------------------------------------------------------------------===// - .globl ASM_TSAN_SYMBOL(__xray_FunctionTailExit) + .globl ASM_SYMBOL(__xray_FunctionTailExit) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_FunctionTailExit) -ASM_TSAN_SYMBOL(__xray_FunctionTailExit): +ASM_SYMBOL(__xray_FunctionTailExit): CFI_STARTPROC SAVE_REGISTERS - movq ASM_TSAN_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax + movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax testq %rax,%rax je .Ltmp4 @@ -175,20 +175,20 @@ ASM_TSAN_SYMBOL(__xray_FunctionTailExit): //===----------------------------------------------------------------------===// - .globl ASM_TSAN_SYMBOL(__xray_ArgLoggerEntry) + .globl ASM_SYMBOL(__xray_ArgLoggerEntry) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry) -ASM_TSAN_SYMBOL(__xray_ArgLoggerEntry): +ASM_SYMBOL(__xray_ArgLoggerEntry): CFI_STARTPROC SAVE_REGISTERS // Again, these function pointer loads must be atomic; MOV is fine. - movq ASM_TSAN_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax + movq ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax testq %rax, %rax jne .Larg1entryLog // If [arg1 logging handler] not set, defer to no-arg logging. - movq ASM_TSAN_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax + movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax testq %rax, %rax je .Larg1entryFail @@ -212,17 +212,17 @@ ASM_TSAN_SYMBOL(__xray_ArgLoggerEntry): //===----------------------------------------------------------------------===// - .global ASM_TSAN_SYMBOL(__xray_CustomEvent) + .global ASM_SYMBOL(__xray_CustomEvent) .align 16, 0x90 ASM_TYPE_FUNCTION(__xray_CustomEvent) -ASM_TSAN_SYMBOL(__xray_CustomEvent): +ASM_SYMBOL(__xray_CustomEvent): CFI_STARTPROC SAVE_REGISTERS // We take two arguments to this trampoline, which should be in rdi and rsi // already. We also make sure that we stash %rax because we use that register // to call the logging handler. - movq ASM_TSAN_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax + movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax testq %rax,%rax je .LcustomEventCleanup |