aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Wiklander <jens.wiklander@linaro.org>2015-12-06 11:01:54 +0100
committerPascal Brand <pascal.brand@st.com>2016-02-10 06:38:36 +0100
commit923c1f343db556196600a1788c437715083264f8 (patch)
treea6573f73668c47c70bc94e6edf83e55b178772b7
parenta1989c09670c506604b60b16d54281a633c5eb41 (diff)
core: stack unwinding
Adds support for stack unwinding, currently only done for fatal aborts from kernel mode if CFG_CORE_UNWIND = y The ARMv7/Aarch32 implementation uses -funwind-tables to generate frame unwinding information which is quite large. Enabling stack unwinding currently consumes ~8 KiB. The code to parse the frame unwind information is imported from FreeBSD. The Aarch64 implementation takes advantage of the frame pointer and has minimal overhead. The core code to unwind the stack is imported from FreeBSD. Reviewed-by: Jerome Forissier <jerome.forissier@linaro.org> Tested-by: Jerome Forissier <jerome.forissier@linaro.org> (HiKey 32/64) Reviewed-by: Pascal Brand <pascal.brand@linaro.org> Tested-by: Jens Wiklander <jens.wiklander@linaro.org> (QEMU, FVP) Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
-rw-r--r--core/arch/arm/arm.mk3
-rw-r--r--core/arch/arm/include/kernel/thread.h2
-rw-r--r--core/arch/arm/include/kernel/unwind.h68
-rw-r--r--core/arch/arm/kernel/abort.c59
-rw-r--r--core/arch/arm/kernel/generic_entry_a32.S31
-rw-r--r--core/arch/arm/kernel/kern.ld.S6
-rw-r--r--core/arch/arm/kernel/misc_a32.S11
-rw-r--r--core/arch/arm/kernel/proc_a32.S13
-rw-r--r--core/arch/arm/kernel/ssvce_a32.S25
-rw-r--r--core/arch/arm/kernel/sub.mk5
-rw-r--r--core/arch/arm/kernel/thread.c8
-rw-r--r--core/arch/arm/kernel/thread_a32.S78
-rw-r--r--core/arch/arm/kernel/tz_ssvce_pl310_a32.S17
-rw-r--r--core/arch/arm/kernel/unwind_arm32.c377
-rw-r--r--core/arch/arm/kernel/unwind_arm64.c49
-rw-r--r--core/arch/arm/kernel/vfp_a32.S40
-rw-r--r--core/arch/arm/plat-ls/ls_core_pos.S3
-rw-r--r--core/arch/arm/plat-mediatek/mt8173_core_pos_a32.S3
-rw-r--r--core/arch/arm/plat-stm/asc.S9
-rw-r--r--core/arch/arm/plat-stm/tz_a9init.S13
-rw-r--r--core/arch/arm/plat-sunxi/entry.S4
-rw-r--r--core/arch/arm/plat-sunxi/kern.ld.S6
-rw-r--r--core/arch/arm/plat-sunxi/smp_boot.S6
-rw-r--r--core/arch/arm/plat-sunxi/smp_fixup.S9
-rw-r--r--core/arch/arm/plat-vexpress/juno_core_pos_a32.S3
-rw-r--r--core/arch/arm/sm/sm_a32.S19
-rw-r--r--core/arch/arm/tee/arch_svc_a32.S8
-rw-r--r--mk/config.mk6
-rw-r--r--scripts/mem_usage.awk3
29 files changed, 878 insertions, 6 deletions
diff --git a/core/arch/arm/arm.mk b/core/arch/arm/arm.mk
index 836a00b4..92e900a5 100644
--- a/core/arch/arm/arm.mk
+++ b/core/arch/arm/arm.mk
@@ -75,6 +75,9 @@ CROSS_COMPILE_core ?= $(CROSS_COMPILE32)
core-platform-cppflags += $(arm32-platform-cppflags)
core-platform-cflags += $(arm32-platform-cflags)
core-platform-cflags += $(arm32-platform-cflags-no-hard-float)
+ifeq ($(CFG_CORE_UNWIND),y)
+core-platform-cflags += -funwind-tables
+endif
core-platform-cflags += $(arm32-platform-cflags-generic)
core-platform-aflags += $(core_arm32-platform-aflags)
core-platform-aflags += $(arm32-platform-aflags)
diff --git a/core/arch/arm/include/kernel/thread.h b/core/arch/arm/include/kernel/thread.h
index 76a21817..7e3113a5 100644
--- a/core/arch/arm/include/kernel/thread.h
+++ b/core/arch/arm/include/kernel/thread.h
@@ -483,6 +483,8 @@ void thread_unwind_user_mode(uint32_t ret, uint32_t exit_status0,
vaddr_t thread_get_saved_thread_sp(void);
#endif /*ARM64*/
+bool thread_addr_is_in_stack(vaddr_t va);
+
/*
* Adds a mutex to the list of held mutexes for current thread
* Requires IRQs to be disabled.
diff --git a/core/arch/arm/include/kernel/unwind.h b/core/arch/arm/include/kernel/unwind.h
new file mode 100644
index 00000000..846f6590
--- /dev/null
+++ b/core/arch/arm/include/kernel/unwind.h
@@ -0,0 +1,68 @@
+/*-
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2000, 2001 Ben Harris
+ * Copyright (c) 1996 Scott K. Stevens
+ *
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie the
+ * rights to redistribute these changes.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef KERNEL_UNWIND
+#define KERNEL_UNWIND
+
+#ifndef ASM
+#include <types_ext.h>
+
+#ifdef ARM32
+/* The state of the unwind process */
+struct unwind_state {
+ uint32_t registers[16];
+ uint32_t start_pc;
+ uint32_t *insn;
+ unsigned entries;
+ unsigned byte;
+ uint16_t update_mask;
+};
+#endif /*ARM32*/
+
+#ifdef ARM64
+struct unwind_state {
+ uint64_t fp;
+ uint64_t sp;
+ uint64_t pc;
+};
+#endif /*ARM64*/
+
+bool unwind_stack(struct unwind_state *state);
+#endif /*ASM*/
+
+#ifdef CFG_CORE_UNWIND
+#define UNWIND(...) __VA_ARGS__
+#else
+#define UNWIND(...)
+#endif
+
+#endif /*KERNEL_UNWIND*/
diff --git a/core/arch/arm/kernel/abort.c b/core/arch/arm/kernel/abort.c
index 3fc917c4..2687d52d 100644
--- a/core/arch/arm/kernel/abort.c
+++ b/core/arch/arm/kernel/abort.c
@@ -30,6 +30,7 @@
#include <kernel/tee_ta_manager.h>
#include <kernel/panic.h>
#include <kernel/user_ta.h>
+#include <kernel/unwind.h>
#include <mm/core_mmu.h>
#include <mm/tee_pager.h>
#include <tee/tee_svc.h>
@@ -43,6 +44,61 @@ enum fault_type {
FAULT_TYPE_IGNORE,
};
+#ifdef CFG_CORE_UNWIND
+#ifdef ARM32
+static void __print_stack_unwind(struct abort_info *ai)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.registers[0] = ai->regs->r0;
+ state.registers[1] = ai->regs->r1;
+ state.registers[2] = ai->regs->r2;
+ state.registers[3] = ai->regs->r3;
+ state.registers[4] = ai->regs->r4;
+ state.registers[5] = ai->regs->r5;
+ state.registers[6] = ai->regs->r6;
+ state.registers[7] = ai->regs->r7;
+ state.registers[8] = ai->regs->r8;
+ state.registers[9] = ai->regs->r9;
+ state.registers[10] = ai->regs->r10;
+ state.registers[11] = ai->regs->r11;
+ state.registers[13] = read_mode_sp(ai->regs->spsr & CPSR_MODE_MASK);
+ state.registers[14] = read_mode_lr(ai->regs->spsr & CPSR_MODE_MASK);
+ state.registers[15] = ai->pc;
+
+ do {
+ EMSG_RAW(" pc 0x%08x", state.registers[15]);
+ } while (unwind_stack(&state));
+}
+#endif /*ARM32*/
+
+#ifdef ARM64
+static void __print_stack_unwind(struct abort_info *ai)
+{
+ struct unwind_state state;
+
+ memset(&state, 0, sizeof(state));
+ state.pc = ai->regs->elr;
+ state.fp = ai->regs->x29;
+
+ do {
+ EMSG_RAW("pc 0x%016" PRIx64, state.pc);
+ } while (unwind_stack(&state));
+}
+#endif /*ARM64*/
+
+static void print_stack_unwind(struct abort_info *ai)
+{
+ EMSG_RAW("Call stack:");
+ __print_stack_unwind(ai);
+}
+#else /*CFG_CORE_UNWIND*/
+static void print_stack_unwind(struct abort_info *ai __unused)
+{
+}
+#endif /*CFG_CORE_UNWIND*/
+
static __maybe_unused const char *abort_type_to_str(uint32_t abort_type)
{
if (abort_type == ABORT_TYPE_DATA)
@@ -133,7 +189,7 @@ void abort_print(struct abort_info *ai __maybe_unused)
#endif /*TRACE_LEVEL >= TRACE_DEBUG*/
}
-void abort_print_error(struct abort_info *ai __maybe_unused)
+void abort_print_error(struct abort_info *ai)
{
#if (TRACE_LEVEL >= TRACE_INFO)
/* full verbose log at DEBUG level */
@@ -157,6 +213,7 @@ void abort_print_error(struct abort_info *ai __maybe_unused)
read_mpidr_el1(), (uint32_t)ai->regs->spsr);
#endif /*ARM64*/
#endif /*TRACE_LEVEL >= TRACE_DEBUG*/
+ print_stack_unwind(ai);
}
#ifdef ARM32
diff --git a/core/arch/arm/kernel/generic_entry_a32.S b/core/arch/arm/kernel/generic_entry_a32.S
index fee4be8b..d2c3171c 100644
--- a/core/arch/arm/kernel/generic_entry_a32.S
+++ b/core/arch/arm/kernel/generic_entry_a32.S
@@ -33,6 +33,7 @@
#include <sm/teesmc.h>
#include <sm/teesmc_opteed_macros.h>
#include <sm/teesmc_opteed.h>
+#include <kernel/unwind.h>
.section .data
.balign 4
@@ -63,16 +64,21 @@ END_FUNC _start
/* Let platforms override this if needed */
.weak plat_cpu_reset_early
FUNC plat_cpu_reset_early , :
+UNWIND( .fnstart)
bx lr
+UNWIND( .fnend)
END_FUNC plat_cpu_reset_early
.weak plat_cpu_reset_late
FUNC plat_cpu_reset_late , :
+UNWIND( .fnstart)
bx lr
+UNWIND( .fnend)
END_FUNC plat_cpu_reset_late
#ifdef CFG_BOOT_SYNC_CPU
LOCAL_FUNC cpu_is_ready , :
+UNWIND( .fnstart)
lsl r0, r0, #2
ldr r1,=sem_cpu_sync
ldr r2, =SEM_CPU_READY
@@ -80,9 +86,11 @@ LOCAL_FUNC cpu_is_ready , :
dsb
sev
bx lr
+UNWIND( .fnend)
END_FUNC cpu_is_ready
LOCAL_FUNC wait_primary , :
+UNWIND( .fnstart)
ldr r0, =sem_cpu_sync
mov r2, #SEM_CPU_READY
sev
@@ -92,9 +100,11 @@ _wait_cpu0:
wfene
bne _wait_cpu0
bx lr
+UNWIND( .fnend)
END_FUNC wait_primary
LOCAL_FUNC wait_secondary , :
+UNWIND( .fnstart)
ldr r0, =sem_cpu_sync
mov r3, #CFG_TEE_CORE_NB_CORE
mov r2, #SEM_CPU_READY
@@ -111,6 +121,7 @@ _wait_cpun:
b _wait_next
_synced_cpun:
bx lr
+UNWIND( .fnend)
END_FUNC wait_secondary
#else
@@ -119,19 +130,27 @@ END_FUNC wait_secondary
* So cpu synchronization functions are empty
*/
LOCAL_FUNC cpu_is_ready , :
+UNWIND( .fnstart)
bx lr
+UNWIND( .fnend)
END_FUNC cpu_is_ready
LOCAL_FUNC wait_primary , :
+UNWIND( .fnstart)
bx lr
+UNWIND( .fnend)
END_FUNC wait_primary
LOCAL_FUNC wait_secondary , :
+UNWIND( .fnstart)
bx lr
+UNWIND( .fnend)
END_FUNC wait_secondary
#endif
LOCAL_FUNC reset , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
mov r4, r0 /* Save pageable part address */
mov r5, lr /* Save ns-entry address */
@@ -168,9 +187,12 @@ LOCAL_FUNC reset , :
beq reset_primary
b reset_secondary
#endif
+UNWIND( .fnend)
END_FUNC reset
LOCAL_FUNC reset_primary , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
#ifdef CFG_TEE_GDB_BOOT
/* save linux boot args from GDB */
ldr r0, =gdb_bootargs
@@ -319,16 +341,21 @@ copy_init:
mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC reset_primary
LOCAL_FUNC unhandled_cpu , :
+UNWIND( .fnstart)
wfi
b unhandled_cpu
+UNWIND( .fnend)
END_FUNC unhandled_cpu
#if defined(CFG_WITH_ARM_TRUSTED_FW)
FUNC cpu_on_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
mov r4, r0
mov r5, r1
mov r6, lr
@@ -358,11 +385,14 @@ FUNC cpu_on_handler , :
bl generic_boot_cpu_on_handler
bx r6
+UNWIND( .fnend)
END_FUNC cpu_on_handler
#else /* defined(CFG_WITH_ARM_TRUSTED_FW) */
LOCAL_FUNC reset_secondary , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
bl wait_primary
bl get_core_pos
@@ -392,5 +422,6 @@ LOCAL_FUNC reset_secondary , :
mov r3, #0
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC reset_secondary
#endif /* defined(CFG_WITH_ARM_TRUSTED_FW) */
diff --git a/core/arch/arm/kernel/kern.ld.S b/core/arch/arm/kernel/kern.ld.S
index 88edb852..ef78efce 100644
--- a/core/arch/arm/kernel/kern.ld.S
+++ b/core/arch/arm/kernel/kern.ld.S
@@ -116,6 +116,12 @@ SECTIONS
__exidx_end = .;
}
+ .ARM.extab : {
+ __extab_start = .;
+ *(.ARM.extab*)
+ __extab_end = .;
+ }
+
.rodata : ALIGN(4) {
__rodata_start = .;
*(.gnu.linkonce.r.*)
diff --git a/core/arch/arm/kernel/misc_a32.S b/core/arch/arm/kernel/misc_a32.S
index b6603906..48fd8baa 100644
--- a/core/arch/arm/kernel/misc_a32.S
+++ b/core/arch/arm/kernel/misc_a32.S
@@ -28,17 +28,20 @@
#include <asm.S>
#include <arm.h>
#include <arm32_macros.S>
+#include <kernel/unwind.h>
/* Let platforms override this if needed */
.weak get_core_pos
FUNC get_core_pos , :
+UNWIND( .fnstart)
read_mpidr r0
/* Calculate CorePos = (ClusterId * 4) + CoreId */
and r1, r0, #MPIDR_CPU_MASK
and r0, r0, #MPIDR_CLUSTER_MASK
add r0, r1, r0, LSR #6
bx lr
+UNWIND( .fnend)
END_FUNC get_core_pos
/*
@@ -46,6 +49,7 @@ END_FUNC get_core_pos
* returns cpsr to be set
*/
LOCAL_FUNC temp_set_mode , :
+UNWIND( .fnstart)
mov r1, r0
cmp r1, #CPSR_MODE_USR /* update mode: usr -> sys */
moveq r1, #CPSR_MODE_SYS
@@ -54,26 +58,33 @@ LOCAL_FUNC temp_set_mode , :
bic r0, #CPSR_MODE_MASK /* clear mode */
orr r0, r1 /* set expected mode */
bx lr
+UNWIND( .fnend)
END_FUNC temp_set_mode
/* uint32_t read_mode_sp(int cpu_mode) */
FUNC read_mode_sp , :
+UNWIND( .fnstart)
push {r4, lr}
+UNWIND( .save {r4, lr})
mrs r4, cpsr /* save cpsr */
bl temp_set_mode
msr cpsr, r0 /* set the new mode */
mov r0, sp /* get the function result */
msr cpsr, r4 /* back to the old mode */
pop {r4, pc}
+UNWIND( .fnend)
END_FUNC read_mode_sp
/* uint32_t read_mode_lr(int cpu_mode) */
FUNC read_mode_lr , :
+UNWIND( .fnstart)
push {r4, lr}
+UNWIND( .save {r4, lr})
mrs r4, cpsr /* save cpsr */
bl temp_set_mode
msr cpsr, r0 /* set the new mode */
mov r0, lr /* get the function result */
msr cpsr, r4 /* back to the old mode */
pop {r4, pc}
+UNWIND( .fnend)
END_FUNC read_mode_lr
diff --git a/core/arch/arm/kernel/proc_a32.S b/core/arch/arm/kernel/proc_a32.S
index d0ba05db..d5ec205c 100644
--- a/core/arch/arm/kernel/proc_a32.S
+++ b/core/arch/arm/kernel/proc_a32.S
@@ -31,6 +31,7 @@
#include <kernel/tz_proc.h>
#include <kernel/tz_proc_def.h>
+#include <kernel/unwind.h>
#include <asm.S>
#include <arm.h>
#include <arm32_macros.S>
@@ -39,6 +40,7 @@
/* void cpu_spin_lock(lock address) - lock mutex */
FUNC cpu_spin_lock , :
+UNWIND( .fnstart)
mov r2, #SPINLOCK_LOCK
_spinlock_loop:
ldrex r1, [r0]
@@ -51,10 +53,12 @@ _spinlock_loop:
bne _spinlock_loop
dmb
bx lr
+UNWIND( .fnend)
END_FUNC cpu_spin_lock
/* int cpu_spin_trylock(lock address) - return 0 on success */
FUNC cpu_spin_trylock , :
+UNWIND( .fnstart)
mov r2, #SPINLOCK_LOCK
mov r1, r0
_trylock_loop:
@@ -70,16 +74,19 @@ _trylock_out:
clrex
dmb
bx lr
+UNWIND( .fnend)
END_FUNC cpu_spin_trylock
/* void cpu_spin_unlock(lock address) - unlock mutex */
FUNC cpu_spin_unlock , :
+UNWIND( .fnstart)
dmb
mov r1, #SPINLOCK_UNLOCK
str r1, [r0]
dsb
sev
bx lr
+UNWIND( .fnend)
END_FUNC cpu_spin_unlock
/*
@@ -89,6 +96,7 @@ END_FUNC cpu_spin_unlock
* An DSB and ISB insures MMUs is enabled before routine returns
*/
FUNC cpu_mmu_enable , :
+UNWIND( .fnstart)
/* Invalidate TLB */
write_tlbiall
@@ -101,10 +109,12 @@ FUNC cpu_mmu_enable , :
isb
bx lr
+UNWIND( .fnend)
END_FUNC cpu_mmu_enable
/* void cpu_mmu_enable_icache(void) - enable instruction cache */
FUNC cpu_mmu_enable_icache , :
+UNWIND( .fnstart)
/* Invalidate instruction cache and branch predictor */
write_iciallu
write_bpiall
@@ -118,10 +128,12 @@ FUNC cpu_mmu_enable_icache , :
isb
bx lr
+UNWIND( .fnend)
END_FUNC cpu_mmu_enable_icache
/* void cpu_mmu_enable_dcache(void) - enable data cache */
FUNC cpu_mmu_enable_dcache , :
+UNWIND( .fnstart)
read_sctlr r0
orr r0, r0, #SCTLR_C
write_sctlr r0
@@ -130,4 +142,5 @@ FUNC cpu_mmu_enable_dcache , :
isb
bx lr
+UNWIND( .fnend)
END_FUNC cpu_mmu_enable_dcache
diff --git a/core/arch/arm/kernel/ssvce_a32.S b/core/arch/arm/kernel/ssvce_a32.S
index 4b42718f..0c5c8c24 100644
--- a/core/arch/arm/kernel/ssvce_a32.S
+++ b/core/arch/arm/kernel/ssvce_a32.S
@@ -38,6 +38,7 @@
#include <kernel/tz_proc_def.h>
#include <kernel/tz_ssvce_def.h>
+#include <kernel/unwind.h>
.section .text.ssvce
@@ -52,6 +53,7 @@
* void secure_mmu_unifiedtlbinvall(void);
*/
FUNC secure_mmu_unifiedtlbinvall , :
+UNWIND( .fnstart)
write_tlbiallis
@@ -59,6 +61,7 @@ FUNC secure_mmu_unifiedtlbinvall , :
ISB
MOV PC, LR
+UNWIND( .fnend)
END_FUNC secure_mmu_unifiedtlbinvall
/*
@@ -67,6 +70,7 @@ END_FUNC secure_mmu_unifiedtlbinvall
* Combine VA and current ASID, and invalidate matching TLB
*/
FUNC secure_mmu_unifiedtlbinvbymva , :
+UNWIND( .fnstart)
b . @ Wrong code to force fix/check the routine before using it
@@ -80,6 +84,7 @@ FUNC secure_mmu_unifiedtlbinvbymva , :
ISB
MOV PC, LR
+UNWIND( .fnend)
END_FUNC secure_mmu_unifiedtlbinvbymva
/*
@@ -88,6 +93,7 @@ END_FUNC secure_mmu_unifiedtlbinvbymva
* Invalidate TLB matching current ASID
*/
FUNC secure_mmu_unifiedtlbinv_curasid , :
+UNWIND( .fnstart)
read_contextidr r0
and r0, r0, #0xff /* Get current ASID */
/* Invalidate unified TLB by ASID Inner Sharable */
@@ -95,6 +101,7 @@ FUNC secure_mmu_unifiedtlbinv_curasid , :
dsb
isb
mov pc, lr
+UNWIND( .fnend)
END_FUNC secure_mmu_unifiedtlbinv_curasid
/*
@@ -103,18 +110,21 @@ END_FUNC secure_mmu_unifiedtlbinv_curasid
* Invalidate TLB matching current ASID
*/
FUNC secure_mmu_unifiedtlbinv_byasid , :
+UNWIND( .fnstart)
and r0, r0, #0xff /* Get ASID */
/* Invalidate unified TLB by ASID Inner Sharable */
write_tlbiasidis r0
dsb
isb
mov pc, lr
+UNWIND( .fnend)
END_FUNC secure_mmu_unifiedtlbinv_byasid
/*
* void arm_cl1_d_cleanbysetway(void)
*/
FUNC arm_cl1_d_cleanbysetway , :
+UNWIND( .fnstart)
MOV R0, #0 @ ; write the Cache Size selection register to be
MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
@@ -135,9 +145,11 @@ _cl_nextLine:
DSB @ ; synchronise
MOV PC, LR
+UNWIND( .fnend)
END_FUNC arm_cl1_d_cleanbysetway
FUNC arm_cl1_d_invbysetway , :
+UNWIND( .fnstart)
MOV R0, #0 @ ; write the Cache Size selection register to be
MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
@@ -159,9 +171,11 @@ _inv_nextLine:
DSB @ ; synchronise
MOV PC, LR
+UNWIND( .fnend)
END_FUNC arm_cl1_d_invbysetway
FUNC arm_cl1_d_cleaninvbysetway , :
+UNWIND( .fnstart)
MOV R0, #0 @ ; write the Cache Size selection register to be
MCR p15, 2, R0, c0, c0, 0 @ ; sure we address the data cache
@@ -182,12 +196,14 @@ _cli_nextLine:
DSB @ ; synchronise
MOV PC, LR
+UNWIND( .fnend)
END_FUNC arm_cl1_d_cleaninvbysetway
/*
* void arm_cl1_d_cleanbyva(void *s, void *e);
*/
FUNC arm_cl1_d_cleanbyva , :
+UNWIND( .fnstart)
CMP R0, R1 @ ; check that end >= start. Otherwise return.
BHI _cl_area_exit
@@ -207,12 +223,14 @@ _cl_area_exit:
DSB @ ; synchronise
MOV PC, LR
+UNWIND( .fnend)
END_FUNC arm_cl1_d_cleanbyva
/*
* void arm_cl1_d_invbyva(void *s, void *e);
*/
FUNC arm_cl1_d_invbyva , :
+UNWIND( .fnstart)
CMP R0, R1 @ ; check that end >= start. Otherwise return.
BHI _inv_area_dcache_exit
@@ -232,12 +250,14 @@ _inv_area_dcache_nl:
_inv_area_dcache_exit:
DSB
MOV PC, LR
+UNWIND( .fnend)
END_FUNC arm_cl1_d_invbyva
/*
* void arm_cl1_d_cleaninvbyva(void *s, void *e);
*/
FUNC arm_cl1_d_cleaninvbyva , :
+UNWIND( .fnstart)
CMP R0, R1 @ ; check that end >= start. Otherwise return.
BHI _cli_area_exit
@@ -256,6 +276,7 @@ _cli_area_nextLine:
_cli_area_exit:
DSB @ ; synchronise
MOV PC, LR
+UNWIND( .fnend)
END_FUNC arm_cl1_d_cleaninvbyva
/*
@@ -265,6 +286,7 @@ END_FUNC arm_cl1_d_cleaninvbyva
* It also invalidates the BTAC.
*/
FUNC arm_cl1_i_inv_all , :
+UNWIND( .fnstart)
/* Invalidate Entire Instruction Cache */
MOV R0, #0
@@ -279,6 +301,7 @@ FUNC arm_cl1_i_inv_all , :
ISB /* by the instructions rigth after the ISB */
BX LR
+UNWIND( .fnend)
END_FUNC arm_cl1_i_inv_all
/*
@@ -288,6 +311,7 @@ END_FUNC arm_cl1_i_inv_all
* It also invalidates the BTAC.
*/
FUNC arm_cl1_i_inv , :
+UNWIND( .fnstart)
CMP R0, R1 /* Check that end >= start. Otherwise return. */
BHI _inv_icache_exit
@@ -308,4 +332,5 @@ _inv_icache_nextLine:
_inv_icache_exit:
BX LR
+UNWIND( .fnend)
END_FUNC arm_cl1_i_inv
diff --git a/core/arch/arm/kernel/sub.mk b/core/arch/arm/kernel/sub.mk
index 67e5df72..769f790d 100644
--- a/core/arch/arm/kernel/sub.mk
+++ b/core/arch/arm/kernel/sub.mk
@@ -38,3 +38,8 @@ ifeq ($(CFG_GENERIC_BOOT),y)
srcs-$(CFG_ARM32_core) += generic_entry_a32.S
srcs-$(CFG_ARM64_core) += generic_entry_a64.S
endif
+
+ifeq ($(CFG_CORE_UNWIND),y)
+srcs-$(CFG_ARM32_core) += unwind_arm32.c
+srcs-$(CFG_ARM64_core) += unwind_arm64.c
+endif
diff --git a/core/arch/arm/kernel/thread.c b/core/arch/arm/kernel/thread.c
index b3f0f71e..9ea5014c 100644
--- a/core/arch/arm/kernel/thread.c
+++ b/core/arch/arm/kernel/thread.c
@@ -577,6 +577,14 @@ vaddr_t thread_get_saved_thread_sp(void)
}
#endif /*ARM64*/
+bool thread_addr_is_in_stack(vaddr_t va)
+{
+ struct thread_ctx *thr = threads + thread_get_id();
+
+ return va < thr->stack_va_end &&
+ va >= (thr->stack_va_end - STACK_THREAD_SIZE);
+}
+
void thread_state_free(void)
{
struct thread_core_local *l = thread_get_core_local();
diff --git a/core/arch/arm/kernel/thread_a32.S b/core/arch/arm/kernel/thread_a32.S
index 33a95929..0762ee4d 100644
--- a/core/arch/arm/kernel/thread_a32.S
+++ b/core/arch/arm/kernel/thread_a32.S
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016, Linaro Limited
* Copyright (c) 2014, STMicroelectronics International N.V.
* All rights reserved.
*
@@ -33,10 +34,13 @@
#include <sm/teesmc_opteed.h>
#include <kernel/abort.h>
#include <kernel/thread_defs.h>
+#include <kernel/unwind.h>
.section .text.thread_asm
LOCAL_FUNC vector_std_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
push {r0-r7}
mov r0, sp
bl thread_handle_std_smc
@@ -50,9 +54,12 @@ LOCAL_FUNC vector_std_smc_entry , :
ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC vector_std_smc_entry
LOCAL_FUNC vector_fast_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
push {r0-r7}
mov r0, sp
bl thread_handle_fast_smc
@@ -60,9 +67,12 @@ LOCAL_FUNC vector_fast_smc_entry , :
ldr r0, =TEESMC_OPTEED_RETURN_CALL_DONE
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC vector_fast_smc_entry
LOCAL_FUNC vector_fiq_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
/* Secure Monitor received a FIQ and passed control to us. */
bl thread_check_canaries
ldr lr, =thread_fiq_handler_ptr
@@ -72,9 +82,12 @@ LOCAL_FUNC vector_fiq_entry , :
ldr r0, =TEESMC_OPTEED_RETURN_FIQ_DONE
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC vector_fiq_entry
LOCAL_FUNC vector_cpu_on_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
ldr lr, =thread_cpu_on_handler_ptr
ldr lr, [lr]
blx lr
@@ -82,9 +95,12 @@ LOCAL_FUNC vector_cpu_on_entry , :
ldr r0, =TEESMC_OPTEED_RETURN_ON_DONE
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC vector_cpu_on_entry
LOCAL_FUNC vector_cpu_off_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
ldr lr, =thread_cpu_off_handler_ptr
ldr lr, [lr]
blx lr
@@ -92,9 +108,12 @@ LOCAL_FUNC vector_cpu_off_entry , :
ldr r0, =TEESMC_OPTEED_RETURN_OFF_DONE
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC vector_cpu_off_entry
LOCAL_FUNC vector_cpu_suspend_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
ldr lr, =thread_cpu_suspend_handler_ptr
ldr lr, [lr]
blx lr
@@ -102,9 +121,12 @@ LOCAL_FUNC vector_cpu_suspend_entry , :
ldr r0, =TEESMC_OPTEED_RETURN_SUSPEND_DONE
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC vector_cpu_suspend_entry
LOCAL_FUNC vector_cpu_resume_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
ldr lr, =thread_cpu_resume_handler_ptr
ldr lr, [lr]
blx lr
@@ -112,9 +134,12 @@ LOCAL_FUNC vector_cpu_resume_entry , :
ldr r0, =TEESMC_OPTEED_RETURN_RESUME_DONE
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC vector_cpu_resume_entry
LOCAL_FUNC vector_system_off_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
ldr lr, =thread_system_off_handler_ptr
ldr lr, [lr]
blx lr
@@ -122,9 +147,12 @@ LOCAL_FUNC vector_system_off_entry , :
ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC vector_system_off_entry
LOCAL_FUNC vector_system_reset_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
ldr lr, =thread_system_reset_handler_ptr
ldr lr, [lr]
blx lr
@@ -132,6 +160,7 @@ LOCAL_FUNC vector_system_reset_entry , :
ldr r0, =TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC vector_system_reset_entry
/*
@@ -144,6 +173,8 @@ END_FUNC vector_system_reset_entry
* sync with sm_entry_vector in sm.c
*/
FUNC thread_vector_table , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
b vector_std_smc_entry
b vector_fast_smc_entry
b vector_cpu_on_entry
@@ -153,34 +184,46 @@ FUNC thread_vector_table , :
b vector_fiq_entry
b vector_system_off_entry
b vector_system_reset_entry
+UNWIND( .fnend)
END_FUNC thread_vector_table
FUNC thread_set_abt_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
mrs r1, cpsr
cps #CPSR_MODE_ABT
mov sp, r0
msr cpsr, r1
bx lr
+UNWIND( .fnend)
END_FUNC thread_set_abt_sp
FUNC thread_set_irq_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
mrs r1, cpsr
cps #CPSR_MODE_IRQ
mov sp, r0
msr cpsr, r1
bx lr
+UNWIND( .fnend)
END_FUNC thread_set_irq_sp
FUNC thread_set_fiq_sp , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
mrs r1, cpsr
cps #CPSR_MODE_FIQ
mov sp, r0
msr cpsr, r1
bx lr
+UNWIND( .fnend)
END_FUNC thread_set_fiq_sp
/* void thread_resume(struct thread_ctx_regs *regs) */
FUNC thread_resume , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
add r12, r0, #(13 * 4) /* Restore registers r0-r12 later */
cps #CPSR_MODE_SYS
@@ -196,9 +239,9 @@ FUNC thread_resume , :
ldm r0, {r0-r12}
-
/* Restore CPSR and jump to the instruction to resume at */
rfefd sp!
+UNWIND( .fnend)
END_FUNC thread_resume
/*
@@ -206,6 +249,8 @@ END_FUNC thread_resume
* CPSR.
*/
LOCAL_FUNC thread_save_state , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
push {r12, lr}
/*
* Uses stack for temporary storage, while storing needed
@@ -244,9 +289,12 @@ LOCAL_FUNC thread_save_state , :
mov r0, r5 /* Return original CPSR */
bx lr
+UNWIND( .fnend)
END_FUNC thread_save_state
FUNC thread_std_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
/* Pass r0-r7 in a struct thread_smc_args */
push {r0-r7}
mov r0, sp
@@ -273,13 +321,17 @@ FUNC thread_std_smc_entry , :
mov r4, r7
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC thread_std_smc_entry
/* void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) */
FUNC thread_rpc , :
+UNWIND( .fnstart)
push {lr}
+UNWIND( .save {lr})
push {r0}
+UNWIND( .save {r0})
bl thread_save_state
mov r4, r0 /* Save original CPSR */
@@ -314,9 +366,12 @@ FUNC thread_rpc , :
pop {r12} /* Get pointer to rv[] */
stm r12, {r0-r2} /* Store r0-r2 into rv[] */
pop {pc}
+UNWIND( .fnend)
END_FUNC thread_rpc
LOCAL_FUNC thread_fiq_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
/* FIQ has a +4 offset for lr compared to preferred return address */
sub lr, lr, #4
push {r0-r12, lr}
@@ -326,9 +381,12 @@ LOCAL_FUNC thread_fiq_handler , :
blx lr
pop {r0-r12, lr}
movs pc, lr
+UNWIND( .fnend)
END_FUNC thread_fiq_handler
LOCAL_FUNC thread_irq_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
/*
* IRQ mode is set up to use tmp stack so FIQ has to be
* disabled before touching the stack. We can also assign
@@ -364,13 +422,16 @@ LOCAL_FUNC thread_irq_handler , :
/* r4 is already filled in above */
smc #0
b . /* SMC should not return */
+UNWIND( .fnend)
END_FUNC thread_irq_handler
FUNC thread_init_vbar , :
+UNWIND( .fnstart)
/* Set vector (VBAR) */
ldr r0, =thread_vect_table
write_vbar r0
bx lr
+UNWIND( .fnend)
END_FUNC thread_init_vbar
/*
@@ -415,6 +476,8 @@ END_FUNC thread_init_vbar
*
*/
FUNC __thread_enter_user_mode , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
/*
* Save all registers to allow syscall_return() to resume execution
* as if this function would have returned. This is also used in
@@ -451,6 +514,7 @@ FUNC __thread_enter_user_mode , :
mov lr, #0
/* Call the user function with its arguments */
movs pc, r5
+UNWIND( .fnend)
END_FUNC __thread_enter_user_mode
/*
@@ -459,6 +523,8 @@ END_FUNC __thread_enter_user_mode
* See description in thread.h
*/
FUNC thread_unwind_user_mode , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
ldr ip, [sp, #(14 * 0x4)] /* &ctx->panicked */
str r1, [ip]
ldr ip, [sp, #(15 * 0x4)] /* &ctx->panic_code */
@@ -471,11 +537,14 @@ FUNC thread_unwind_user_mode , :
cps #CPSR_MODE_SVC
pop {r4-r12,pc} /* Match the push in thread_enter_user_mode()*/
+UNWIND( .fnend)
END_FUNC thread_unwind_user_mode
LOCAL_FUNC thread_abort_handler , :
thread_abort_handler:
thread_und_handler:
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
/*
* Switch to abort mode to use that stack instead.
*/
@@ -526,9 +595,12 @@ thread_pabort_handler:
msr spsr_fsxc, r0
pop {r0-r11, ip}
movs pc, lr
+UNWIND( .fnend)
END_FUNC thread_abort_handler
LOCAL_FUNC thread_svc_handler , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
push {r0-r7, lr}
mrs r0, spsr
push {r0}
@@ -538,10 +610,13 @@ LOCAL_FUNC thread_svc_handler , :
msr spsr_fsxc, r0
pop {r0-r7, lr}
movs pc, lr
+UNWIND( .fnend)
END_FUNC thread_svc_handler
.align 5
LOCAL_FUNC thread_vect_table , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
b . /* Reset */
b thread_und_handler /* Undefined instruction */
b thread_svc_handler /* System call */
@@ -550,4 +625,5 @@ LOCAL_FUNC thread_vect_table , :
b . /* Reserved */
b thread_irq_handler /* IRQ */
b thread_fiq_handler /* FIQ */
+UNWIND( .fnend)
END_FUNC thread_vect_table
diff --git a/core/arch/arm/kernel/tz_ssvce_pl310_a32.S b/core/arch/arm/kernel/tz_ssvce_pl310_a32.S
index 911ed8f5..d16937bb 100644
--- a/core/arch/arm/kernel/tz_ssvce_pl310_a32.S
+++ b/core/arch/arm/kernel/tz_ssvce_pl310_a32.S
@@ -28,9 +28,11 @@
#include <kernel/tz_proc_def.h>
#include <kernel/tz_ssvce_def.h>
#include <asm.S>
+#include <kernel/unwind.h>
/* lock all L2 caches ways for data and instruction */
FUNC arm_cl2_lockallways , :
+UNWIND( .fnstart)
mov r0, #PL310_NB_WAYS
movw r1, #PL310_DCACHE_LOCKDOWN_BASE
@@ -43,12 +45,14 @@ loop_data_lockdown:
bne loop_data_lockdown
mov pc, lr
+UNWIND( .fnend)
END_FUNC arm_cl2_lockallways
/*
* void arm_cl2_cleaninvbyway(void) - clean & invalidate the whole L2 cache.
*/
FUNC arm_cl2_cleaninvbyway , :
+UNWIND( .fnstart)
/* Clean and invalidate all cache ways */
movw r0, #PL310_FLUSH_BY_WAY
@@ -84,10 +88,12 @@ loop_cli_sync_done:
bne loop_cli_sync_done
mov pc, lr
+UNWIND( .fnend)
END_FUNC arm_cl2_cleaninvbyway
/* void (arm_cl2_invbyway(void) */
FUNC arm_cl2_invbyway , :
+UNWIND( .fnstart)
/* Clean by Way */
movw r0, #PL310_INV_BY_WAY
@@ -124,10 +130,12 @@ loop_inv_way_sync_done:
bne loop_inv_way_sync_done
mov pc, lr
+UNWIND( .fnend)
END_FUNC arm_cl2_invbyway
/* void arm_cl2_cleanbyway(u32 pa) */
FUNC arm_cl2_cleanbyway , :
+UNWIND( .fnstart)
/* Clean by Way */
movw r0, #PL310_CLEAN_BY_WAY
@@ -164,6 +172,7 @@ loop_cl_way_sync_done:
bne loop_cl_way_sync_done
mov pc, lr
+UNWIND( .fnend)
END_FUNC arm_cl2_cleanbyway
/*
@@ -171,6 +180,7 @@ END_FUNC arm_cl2_cleanbyway
* pl310value is one of PL310_CLEAN_BY_PA, PL310_INV_BY_PA or PL310_FLUSH_BY_PA
*/
LOCAL_FUNC _arm_cl2_xxxbypa , :
+UNWIND( .fnstart)
/* Align start address on PL310 line size */
and r0, #(~(PL310_LINE_SIZE - 1))
@@ -220,6 +230,7 @@ loop_xxx_pa_sync_done:
bne loop_xxx_pa_sync_done
mov pc, lr
+UNWIND( .fnend)
END_FUNC _arm_cl2_xxxbypa
/*
@@ -227,9 +238,11 @@ END_FUNC _arm_cl2_xxxbypa
* clean L2 cache by physical address range.
*/
FUNC arm_cl2_cleanbypa , :
+UNWIND( .fnstart)
movw r2, #PL310_CLEAN_BY_PA
movt r2, #PL310_BASE_H
b _arm_cl2_xxxbypa
+UNWIND( .fnend)
END_FUNC arm_cl2_cleanbypa
/*
@@ -237,9 +250,11 @@ END_FUNC arm_cl2_cleanbypa
* invalidate L2 cache by physical address range.
*/
FUNC arm_cl2_invbypa , :
+UNWIND( .fnstart)
movw r2, #PL310_INV_BY_PA
movt r2, #PL310_BASE_H
b _arm_cl2_xxxbypa
+UNWIND( .fnend)
END_FUNC arm_cl2_invbypa
/*
@@ -247,8 +262,10 @@ END_FUNC arm_cl2_invbypa
* clean and invalidate L2 cache by physical address range.
*/
FUNC arm_cl2_cleaninvbypa , :
+UNWIND( .fnstart)
movw r2, #PL310_FLUSH_BY_PA
movt r2, #PL310_BASE_H
b _arm_cl2_xxxbypa
+UNWIND( .fnend)
END_FUNC arm_cl2_cleaninvbypa
diff --git a/core/arch/arm/kernel/unwind_arm32.c b/core/arch/arm/kernel/unwind_arm32.c
new file mode 100644
index 00000000..921f469d
--- /dev/null
+++ b/core/arch/arm/kernel/unwind_arm32.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright 2015 Linaro Limited
+ * Copyright 2013-2014 Andrew Turner.
+ * Copyright 2013-2014 Ian Lepore.
+ * Copyright 2013-2014 Rui Paulo.
+ * Copyright 2013 Eitan Adler.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <kernel/unwind.h>
+#include <trace.h>
+
+/* The register names */
+#define FP 11
+#define SP 13
+#define LR 14
+#define PC 15
+
+/*
+ * Definitions for the instruction interpreter.
+ *
+ * The ARM EABI specifies how to perform the frame unwinding in the
+ * Exception Handling ABI for the ARM Architecture document. To perform
+ * the unwind we need to know the initial frame pointer, stack pointer,
+ * link register and program counter. We then find the entry within the
+ * index table that points to the function the program counter is within.
+ * This gives us either a list of three instructions to process, a 31-bit
+ * relative offset to a table of instructions, or a value telling us
+ * we can't unwind any further.
+ *
+ * When we have the instructions to process we need to decode them
+ * following table 4 in section 9.3. This describes a collection of bit
+ * patterns to encode that steps to take to update the stack pointer and
+ * link register to the correct values at the start of the function.
+ */
+
+/* A special case when we are unable to unwind past this function */
+#define EXIDX_CANTUNWIND 1
+
+/*
+ * Entry types.
+ * These are the only entry types that have been seen in the kernel.
+ */
+#define ENTRY_MASK 0xff000000
+#define ENTRY_ARM_SU16 0x80000000
+#define ENTRY_ARM_LU16 0x81000000
+
+/* Instruction masks. */
+#define INSN_VSP_MASK 0xc0
+#define INSN_VSP_SIZE_MASK 0x3f
+#define INSN_STD_MASK 0xf0
+#define INSN_STD_DATA_MASK 0x0f
+#define INSN_POP_TYPE_MASK 0x08
+#define INSN_POP_COUNT_MASK 0x07
+#define INSN_VSP_LARGE_INC_MASK 0xff
+
+/* Instruction definitions */
+#define INSN_VSP_INC 0x00
+#define INSN_VSP_DEC 0x40
+#define INSN_POP_MASKED 0x80
+#define INSN_VSP_REG 0x90
+#define INSN_POP_COUNT 0xa0
+#define INSN_FINISH 0xb0
+#define INSN_POP_REGS 0xb1
+#define INSN_VSP_LARGE_INC 0xb2
+
+/* An item in the exception index table */
+struct unwind_idx {
+ uint32_t offset;
+ uint32_t insn;
+};
+
+/*
+ * These are set in the linker script. Their addresses will be
+ * either the start or end of the exception table or index.
+ */
+extern struct unwind_idx __exidx_start;
+extern struct unwind_idx __exidx_end;
+
+/* Expand a 31-bit signed value to a 32-bit signed value */
+static int32_t expand_prel31(uint32_t prel31)
+{
+
+ return ((int32_t)(prel31 & 0x7fffffffu) << 1) / 2;
+}
+
+/*
+ * Perform a binary search of the index table to find the function
+ * with the largest address that doesn't exceed addr.
+ */
+static struct unwind_idx *find_index(uint32_t addr)
+{
+ vaddr_t idx_start, idx_end;
+ unsigned int min, mid, max;
+ struct unwind_idx *start;
+ struct unwind_idx *item;
+ int32_t prel31_addr;
+ uint32_t func_addr;
+
+ start = &__exidx_start;
+ idx_start = (vaddr_t)&__exidx_start;
+ idx_end = (vaddr_t)&__exidx_end;
+
+ min = 0;
+ max = (idx_end - idx_start) / sizeof(struct unwind_idx);
+
+ while (min != max) {
+ mid = min + (max - min + 1) / 2;
+
+ item = &start[mid];
+
+ prel31_addr = expand_prel31(item->offset);
+ func_addr = (uint32_t)&item->offset + prel31_addr;
+
+ if (func_addr <= addr) {
+ min = mid;
+ } else {
+ max = mid - 1;
+ }
+ }
+
+ return &start[min];
+}
+
+/* Reads the next byte from the instruction list */
+static uint8_t unwind_exec_read_byte(struct unwind_state *state)
+{
+ uint8_t insn;
+
+ /* Read the unwind instruction */
+ insn = (*state->insn) >> (state->byte * 8);
+
+ /* Update the location of the next instruction */
+ if (state->byte == 0) {
+ state->byte = 3;
+ state->insn++;
+ state->entries--;
+ } else
+ state->byte--;
+
+ return insn;
+}
+
+/* Executes the next instruction on the list */
+static bool unwind_exec_insn(struct unwind_state *state)
+{
+ unsigned int insn;
+ uint32_t *vsp = (uint32_t *)state->registers[SP];
+ int update_vsp = 0;
+
+ /* This should never happen */
+ if (state->entries == 0)
+ return false;
+
+ /* Read the next instruction */
+ insn = unwind_exec_read_byte(state);
+
+ if ((insn & INSN_VSP_MASK) == INSN_VSP_INC) {
+ state->registers[SP] += ((insn & INSN_VSP_SIZE_MASK) << 2) + 4;
+
+ } else if ((insn & INSN_VSP_MASK) == INSN_VSP_DEC) {
+ state->registers[SP] -= ((insn & INSN_VSP_SIZE_MASK) << 2) + 4;
+
+ } else if ((insn & INSN_STD_MASK) == INSN_POP_MASKED) {
+ unsigned int mask, reg;
+
+ /* Load the mask */
+ mask = unwind_exec_read_byte(state);
+ mask |= (insn & INSN_STD_DATA_MASK) << 8;
+
+ /* We have a refuse to unwind instruction */
+ if (mask == 0)
+ return false;
+
+ /* Update SP */
+ update_vsp = 1;
+
+ /* Load the registers */
+ for (reg = 4; mask && reg < 16; mask >>= 1, reg++) {
+ if (mask & 1) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+
+ /* If we have updated SP kep its value */
+ if (reg == SP)
+ update_vsp = 0;
+ }
+ }
+
+ } else if ((insn & INSN_STD_MASK) == INSN_VSP_REG &&
+ ((insn & INSN_STD_DATA_MASK) != 13) &&
+ ((insn & INSN_STD_DATA_MASK) != 15)) {
+ /* sp = register */
+ state->registers[SP] =
+ state->registers[insn & INSN_STD_DATA_MASK];
+
+ } else if ((insn & INSN_STD_MASK) == INSN_POP_COUNT) {
+ unsigned int count, reg;
+
+ /* Read how many registers to load */
+ count = insn & INSN_POP_COUNT_MASK;
+
+ /* Update sp */
+ update_vsp = 1;
+
+ /* Pop the registers */
+ for (reg = 4; reg <= 4 + count; reg++) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+ }
+
+ /* Check if we are in the pop r14 version */
+ if ((insn & INSN_POP_TYPE_MASK) != 0) {
+ state->registers[14] = *vsp++;
+ }
+
+ } else if (insn == INSN_FINISH) {
+ /* Stop processing */
+ state->entries = 0;
+
+ } else if (insn == INSN_POP_REGS) {
+ unsigned int mask, reg;
+
+ mask = unwind_exec_read_byte(state);
+ if (mask == 0 || (mask & 0xf0) != 0)
+ return 1;
+
+ /* Update SP */
+ update_vsp = 1;
+
+ /* Load the registers */
+ for (reg = 0; mask && reg < 4; mask >>= 1, reg++) {
+ if (mask & 1) {
+ state->registers[reg] = *vsp++;
+ state->update_mask |= 1 << reg;
+ }
+ }
+
+ } else if ((insn & INSN_VSP_LARGE_INC_MASK) == INSN_VSP_LARGE_INC) {
+ unsigned int uleb128;
+
+ /* Read the increment value */
+ uleb128 = unwind_exec_read_byte(state);
+
+ state->registers[SP] += 0x204 + (uleb128 << 2);
+
+ } else {
+ /* We hit a new instruction that needs to be implemented */
+ DMSG("Unhandled instruction %.2x\n", insn);
+ return false;
+ }
+
+ if (update_vsp) {
+ state->registers[SP] = (uint32_t)vsp;
+ }
+
+ return true;
+}
+
+/* Performs the unwind of a function */
+static int unwind_tab(struct unwind_state *state)
+{
+ uint32_t entry;
+
+ /* Set PC to a known value */
+ state->registers[PC] = 0;
+
+ /* Read the personality */
+ entry = *state->insn & ENTRY_MASK;
+
+ if (entry == ENTRY_ARM_SU16) {
+ state->byte = 2;
+ state->entries = 1;
+ } else if (entry == ENTRY_ARM_LU16) {
+ state->byte = 1;
+ state->entries = ((*state->insn >> 16) & 0xFF) + 1;
+ } else {
+ DMSG("Unknown entry: %x\n", entry);
+ return true;
+ }
+
+ while (state->entries > 0) {
+ if (!unwind_exec_insn(state))
+ return true;
+ }
+
+ /*
+ * The program counter was not updated, load it from the link register.
+ */
+ if (state->registers[PC] == 0) {
+ state->registers[PC] = state->registers[LR];
+
+ /*
+ * If the program counter changed, flag it in the update mask.
+ */
+ if (state->start_pc != state->registers[PC])
+ state->update_mask |= 1 << PC;
+ }
+
+ return false;
+}
+
+bool unwind_stack(struct unwind_state *state)
+{
+ struct unwind_idx *index;
+ bool finished;
+
+ /* Reset the mask of updated registers */
+ state->update_mask = 0;
+
+ /* The pc value is correct and will be overwritten, save it */
+ state->start_pc = state->registers[PC];
+
+ /* Find the item to run */
+ index = find_index(state->start_pc);
+
+ finished = false;
+ if (index->insn != EXIDX_CANTUNWIND) {
+ if (index->insn & (1U << 31)) {
+ /* The data is within the instruction */
+ state->insn = &index->insn;
+ } else {
+ /* A prel31 offset to the unwind table */
+ state->insn = (uint32_t *)
+ ((uintptr_t)&index->insn +
+ expand_prel31(index->insn));
+ }
+ /* Run the unwind function */
+ finished = unwind_tab(state);
+ }
+
+ /* This is the top of the stack, finish */
+ if (index->insn == EXIDX_CANTUNWIND)
+ finished = true;
+
+ return !finished;
+}
+
+/*
+ * These functions are referenced but never used
+ */
+void __aeabi_unwind_cpp_pr0(void);
+void __aeabi_unwind_cpp_pr0(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr1(void);
+void __aeabi_unwind_cpp_pr1(void)
+{
+}
+
+void __aeabi_unwind_cpp_pr2(void);
+void __aeabi_unwind_cpp_pr2(void)
+{
+}
diff --git a/core/arch/arm/kernel/unwind_arm64.c b/core/arch/arm/kernel/unwind_arm64.c
new file mode 100644
index 00000000..67a45b12
--- /dev/null
+++ b/core/arch/arm/kernel/unwind_arm64.c
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2015 Linaro Limited
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <kernel/unwind.h>
+#include <kernel/thread.h>
+
+bool unwind_stack(struct unwind_state *frame)
+{
+ uint64_t fp;
+
+ fp = frame->fp;
+ if (!thread_addr_is_in_stack(fp))
+ return false;
+
+ frame->sp = fp + 0x10;
+ /* FP to previous frame (X29) */
+ frame->fp = *(uint64_t *)(fp);
+ /* LR (X30) */
+ frame->pc = *(uint64_t *)(fp + 8) - 4;
+
+ return true;
+}
diff --git a/core/arch/arm/kernel/vfp_a32.S b/core/arch/arm/kernel/vfp_a32.S
index db8cd123..6cc3e77f 100644
--- a/core/arch/arm/kernel/vfp_a32.S
+++ b/core/arch/arm/kernel/vfp_a32.S
@@ -1,41 +1,81 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
#include <asm.S>
+#include <kernel/unwind.h>
.section .text.vfp_asm
/* void vfp_save_extension_regs(uint64_t regs[VFP_NUM_REGS]); */
FUNC vfp_save_extension_regs , :
+UNWIND( .fnstart)
vstm r0!, {d0-d15}
vstm r0, {d16-d31}
bx lr
+UNWIND( .fnend)
END_FUNC vfp_save_extension_regs
/* void vfp_restore_extension_regs(uint64_t regs[VFP_NUM_REGS]); */
FUNC vfp_restore_extension_regs , :
+UNWIND( .fnstart)
vldm r0!, {d0-d15}
vldm r0, {d16-d31}
bx lr
+UNWIND( .fnend)
END_FUNC vfp_restore_extension_regs
/* void vfp_write_fpexc(uint32_t fpexc) */
FUNC vfp_write_fpexc , :
+UNWIND( .fnstart)
vmsr fpexc, r0
bx lr
+UNWIND( .fnend)
END_FUNC vfp_write_fpexc
/* uint32_t vfp_read_fpexc(void) */
FUNC vfp_read_fpexc , :
+UNWIND( .fnstart)
vmrs r0, fpexc
bx lr
+UNWIND( .fnend)
END_FUNC vfp_read_fpexc
/* void vfp_write_fpscr(uint32_t fpscr) */
FUNC vfp_write_fpscr , :
+UNWIND( .fnstart)
vmsr fpscr, r0
bx lr
+UNWIND( .fnend)
END_FUNC vfp_write_fpscr
/* uint32_t vfp_read_fpscr(void) */
FUNC vfp_read_fpscr , :
+UNWIND( .fnstart)
vmrs r0, fpscr
bx lr
+UNWIND( .fnend)
END_FUNC vfp_read_fpscr
diff --git a/core/arch/arm/plat-ls/ls_core_pos.S b/core/arch/arm/plat-ls/ls_core_pos.S
index ae7fc4cc..21cfb4b2 100644
--- a/core/arch/arm/plat-ls/ls_core_pos.S
+++ b/core/arch/arm/plat-ls/ls_core_pos.S
@@ -28,11 +28,14 @@
#include <asm.S>
#include <arm.h>
#include <arm32_macros.S>
+#include <kernel/unwind.h>
/* Layerscape platform specific function to calculate core position. */
FUNC get_core_pos , :
+UNWIND( .fnstart)
read_mpidr r0
/* Calculate CorePos = CoreId */
and r0, r0, #MPIDR_CPU_MASK
bx lr
+UNWIND( .fnend)
END_FUNC get_core_pos
diff --git a/core/arch/arm/plat-mediatek/mt8173_core_pos_a32.S b/core/arch/arm/plat-mediatek/mt8173_core_pos_a32.S
index f58a8ad6..227deb30 100644
--- a/core/arch/arm/plat-mediatek/mt8173_core_pos_a32.S
+++ b/core/arch/arm/plat-mediatek/mt8173_core_pos_a32.S
@@ -28,8 +28,10 @@
#include <asm.S>
#include <arm.h>
#include <arm32_macros.S>
+#include <kernel/unwind.h>
FUNC get_core_pos , :
+UNWIND( .fnstart)
read_mpidr r0
and r1, r0, #MPIDR_CPU_MASK
and r0, r0, #MPIDR_CLUSTER_MASK
@@ -44,5 +46,6 @@ FUNC get_core_pos , :
*/
add r0, r1, r0, LSR #7
bx lr
+UNWIND( .fnend)
END_FUNC get_core_pos
diff --git a/core/arch/arm/plat-stm/asc.S b/core/arch/arm/plat-stm/asc.S
index 6df35429..055d0d7c 100644
--- a/core/arch/arm/plat-stm/asc.S
+++ b/core/arch/arm/plat-stm/asc.S
@@ -26,6 +26,7 @@
*/
#include <platform_config.h>
#include <asm.S>
+#include <kernel/unwind.h>
#define ST_32BIT_REG(address) (address)
@@ -73,6 +74,7 @@ asc_state:
* We rely on some other SW layer to enable ASC IP (power/clamps/clocks/...)
*/
FUNC asc_init , :
+UNWIND( .fnstart)
ldr r1, =asc_state
mov r0, #0
str r0, [r1]
@@ -80,6 +82,7 @@ FUNC asc_init , :
/* TODO: insure ASC is mapped (check against core_init_mmu()/core_mmu.c) */
ldr r0, =0
bx lr
+UNWIND( .fnend)
END_FUNC asc_init
/*
@@ -89,6 +92,7 @@ END_FUNC asc_init
* Clobbers r0-r3
*/
FUNC __asc_xmit , :
+UNWIND( .fnstart)
ldr r1, =asc_state
ldr r1, [r1]
@@ -127,6 +131,7 @@ notlf:
_asc_exit:
LDR r0, =0
BX lr
+UNWIND( .fnend)
END_FUNC __asc_xmit
/*
@@ -135,6 +140,7 @@ END_FUNC __asc_xmit
* Clobbers r0-r3
*/
FUNC __asc_flush , :
+UNWIND( .fnstart)
ldr r1, =asc_state
ldr r1, [r1]
@@ -151,6 +157,7 @@ flush_wait:
_flush_exit:
LDR r0, =0
BX lr
+UNWIND( .fnend)
END_FUNC __asc_flush
/*
@@ -160,6 +167,7 @@ END_FUNC __asc_flush
* Clobbers r0-r3
*/
FUNC __asc_xmit_char , :
+UNWIND( .fnstart)
ldr r1, =asc_state
ldr r1, [r1]
@@ -197,4 +205,5 @@ __asc_char_notlf:
__asc_char_exit:
LDR r0, =0
BX lr
+UNWIND( .fnend)
END_FUNC __asc_xmit_char
diff --git a/core/arch/arm/plat-stm/tz_a9init.S b/core/arch/arm/plat-stm/tz_a9init.S
index a12abadb..9c00e3e6 100644
--- a/core/arch/arm/plat-stm/tz_a9init.S
+++ b/core/arch/arm/plat-stm/tz_a9init.S
@@ -34,6 +34,7 @@
#include <kernel/tz_ssvce_def.h>
#include <arm32_macros.S>
#include <asm.S>
+#include <kernel/unwind.h>
#define CPUID_A9_R2P2_H 0x412f
#define CPUID_A9_R2P2_L 0xc092
@@ -54,6 +55,7 @@
* Trap CPU in case of error.
*/
FUNC arm_secboot_identify_cpu , :
+UNWIND( .fnstart)
mrc p15, 0, r0, c0, c0, 0 /* read A9 ID */
movw r1, #CPUID_A9_R2P2_L
@@ -72,6 +74,7 @@ _ident_a9_r2p2:
_ident_a9_r3p0:
mov pc, lr /* back to tzinit */
+UNWIND( .fnend)
END_FUNC arm_secboot_identify_cpu
/*
@@ -83,6 +86,7 @@ END_FUNC arm_secboot_identify_cpu
* Trap CPU in case of error.
*/
FUNC arm_cl2_config , :
+UNWIND( .fnstart)
mrc p15, 0, r0, c0, c0, 0 /* read A9 ID */
movw r1, #CPUID_A9_R3P0_L
@@ -185,6 +189,7 @@ _config_l2cc_r3p0:
str r1, [r0]
mov pc, lr
+UNWIND( .fnend)
END_FUNC arm_cl2_config
/* End of arm_cl2_config */
@@ -201,6 +206,7 @@ END_FUNC arm_cl2_config
* TODO: to be moved to PL310 code (tz_svce_pl310.S ?)
*/
FUNC arm_cl2_enable , :
+UNWIND( .fnstart)
/* Enable PL310 ctrl -> only set lsb bit */
@@ -219,6 +225,7 @@ FUNC arm_cl2_enable , :
mcr p15, 0, r0, c1, c0, 1
mov pc, lr
+UNWIND( .fnend)
END_FUNC arm_cl2_enable
/*
@@ -230,6 +237,7 @@ END_FUNC arm_cl2_enable
* Trap CPU in case of error.
*/
FUNC plat_cpu_reset_early , :
+UNWIND( .fnstart)
/* only r3p0 is supported */
mrc p15, 0, r0, c0, c0, 0 /* read A9 ID */
@@ -296,6 +304,7 @@ _early_a9_r3p0:
str r1, [r0]
mov pc, lr /* back to tzinit */
+UNWIND( .fnend)
END_FUNC plat_cpu_reset_early
/*
@@ -307,6 +316,7 @@ END_FUNC plat_cpu_reset_early
* Trap CPU in case of error.
*/
FUNC arm_secboot_errata , :
+UNWIND( .fnstart)
mrc p15, 0, r0, c0, c0, 0 /* read A9 ID */
movw r1, #CPUID_A9_R2P2_L
@@ -325,6 +335,7 @@ _errata_a9_r2p2:
_errata_a9_r3p0:
mov pc, lr
+UNWIND( .fnend)
END_FUNC arm_secboot_errata
/*
@@ -338,6 +349,7 @@ END_FUNC arm_secboot_errata
* TODO: size optim in code
*/
FUNC plat_cpu_reset_late , :
+UNWIND( .fnstart)
mrc p15, 0, r0, c0, c0, 5
ands r0, #3
@@ -456,4 +468,5 @@ loop_1:
mcr p15, 0, r0, c1, c1, 0 /* write updated value in Secure Configuration Register */
mov pc, lr
+UNWIND( .fnend)
END_FUNC plat_cpu_reset_late
diff --git a/core/arch/arm/plat-sunxi/entry.S b/core/arch/arm/plat-sunxi/entry.S
index 79878d95..f181b5ff 100644
--- a/core/arch/arm/plat-sunxi/entry.S
+++ b/core/arch/arm/plat-sunxi/entry.S
@@ -33,6 +33,7 @@
#include <sm/teesmc.h>
#include <sm/teesmc_opteed_macros.h>
#include <sm/teesmc_opteed.h>
+#include <kernel/unwind.h>
.section .text.boot
.align 5
@@ -48,6 +49,8 @@ FUNC _start , :
END_FUNC _start
LOCAL_FUNC reset , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
read_sctlr r0
orr r0, r0, #SCTLR_A
write_sctlr r0
@@ -96,5 +99,6 @@ LOCAL_FUNC reset , :
mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
smc #0
b . /* SMC should never return */
+UNWIND( .fnend)
END_FUNC reset
diff --git a/core/arch/arm/plat-sunxi/kern.ld.S b/core/arch/arm/plat-sunxi/kern.ld.S
index c1ad3786..cb886c9c 100644
--- a/core/arch/arm/plat-sunxi/kern.ld.S
+++ b/core/arch/arm/plat-sunxi/kern.ld.S
@@ -107,6 +107,12 @@ SECTIONS
.ARM.exidx : { *(.ARM.exidx* .gnu.linkonce.armexidx.*) }
__exidx_end = .;
+ .ARM.extab : {
+ __extab_start = .;
+ *(.ARM.extab*)
+ __extab_end = .;
+ }
+
.rodata : ALIGN(4) {
__rodata_start = .;
*(.rodata .rodata.* .gnu.linkonce.r.*)
diff --git a/core/arch/arm/plat-sunxi/smp_boot.S b/core/arch/arm/plat-sunxi/smp_boot.S
index 498d4328..b084d6a7 100644
--- a/core/arch/arm/plat-sunxi/smp_boot.S
+++ b/core/arch/arm/plat-sunxi/smp_boot.S
@@ -31,6 +31,8 @@
#include <sm/teesmc.h>
#include <sm/teesmc_opteed_macros.h>
#include <sm/teesmc_opteed.h>
+#include <kernel/unwind.h>
+
FUNC smp_init_vector , :
b . /* Reset */
@@ -44,6 +46,8 @@ FUNC smp_init_vector , :
END_FUNC smp_init_vector
FUNC sunxi_secondary_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
/* secondary CPUs internal initialization */
read_sctlr r0
orr r0, r0, #SCTLR_A
@@ -92,5 +96,5 @@ FUNC sunxi_secondary_entry , :
mov r0, #TEESMC_OPTEED_RETURN_ENTRY_DONE
smc #0
b . /* SMC should not return */
-
+UNWIND( .fnend)
END_FUNC sunxi_secondary_entry
diff --git a/core/arch/arm/plat-sunxi/smp_fixup.S b/core/arch/arm/plat-sunxi/smp_fixup.S
index 18701cc4..bf533b45 100644
--- a/core/arch/arm/plat-sunxi/smp_fixup.S
+++ b/core/arch/arm/plat-sunxi/smp_fixup.S
@@ -24,6 +24,9 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include <asm.S>
+#include <kernel/unwind.h>
+
#define SLAVE_SNOOPCTL_OFFSET 0
#define SNOOPCTL_SNOOP_ENABLE (1 << 0)
#define SNOOPCTL_DVM_ENABLE (1 << 1)
@@ -43,8 +46,8 @@
#define SUNXI_CCU_C0_CFG_OFFSET (0x54)
#define SUNXI_CCU_C1_CFG_OFFSET (0x58)
-.globl sunxi_secondary_fixup
-sunxi_secondary_fixup:
+FUNC sunxi_secondary_fixup , :
+UNWIND( .fnstart)
mrc p15, 0, r0, c0, c0, 5 /* MPIDR */
ubfx r0, r0, #8, #4 /* cluster */
@@ -109,3 +112,5 @@ sunxi_secondary_fixup:
2:
/* a80 platform-specific operations porcess done. */
bx lr
+UNWIND( .fnend)
+END_FUNC sunxi_secondary_fixup
diff --git a/core/arch/arm/plat-vexpress/juno_core_pos_a32.S b/core/arch/arm/plat-vexpress/juno_core_pos_a32.S
index 699d7f51..a75a65de 100644
--- a/core/arch/arm/plat-vexpress/juno_core_pos_a32.S
+++ b/core/arch/arm/plat-vexpress/juno_core_pos_a32.S
@@ -28,9 +28,11 @@
#include <asm.S>
#include <arm.h>
#include <arm32_macros.S>
+#include <kernel/unwind.h>
/* For Juno number the two A57s as 4 to 5 and A53s as 0 to 3 */
FUNC get_core_pos , :
+UNWIND( .fnstart)
read_mpidr r0
/* Calculate CorePos = ((ClusterId ^ 1) * 4) + CoreId */
and r1, r0, #MPIDR_CPU_MASK
@@ -38,5 +40,6 @@ FUNC get_core_pos , :
eor r0, r0, #(1 << MPIDR_CLUSTER_SHIFT)
add r0, r1, r0, LSR #6
bx lr
+UNWIND( .fnend)
END_FUNC get_core_pos
diff --git a/core/arch/arm/sm/sm_a32.S b/core/arch/arm/sm/sm_a32.S
index d2b1dc4e..91dbc804 100644
--- a/core/arch/arm/sm/sm_a32.S
+++ b/core/arch/arm/sm/sm_a32.S
@@ -29,10 +29,13 @@
#include <arm.h>
#include <arm32_macros.S>
#include <sm/teesmc.h>
+#include <kernel/unwind.h>
.section .text.sm_asm
LOCAL_FUNC sm_save_modes_regs , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
/* User mode registers has to be saved from system mode */
cps #CPSR_MODE_SYS
stm r0!, {sp, lr}
@@ -57,10 +60,13 @@ LOCAL_FUNC sm_save_modes_regs , :
ldm r1, {r2-r3} /* Load SPSR and LR from the stack */
stm r0!, {r2-r3} /* Store SPSR and LR in context */
bx lr
+UNWIND( .fnend)
END_FUNC sm_save_modes_regs
/* Restores the mode specific registers */
LOCAL_FUNC sm_restore_modes_regs , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
/* User mode registers has to be saved from system mode */
cps #CPSR_MODE_SYS
ldm r0!, {sp, lr}
@@ -85,9 +91,12 @@ LOCAL_FUNC sm_restore_modes_regs , :
ldm r0!, {r2-r3} /* Load SPSR and LR from context */
stm r1, {r2-r3} /* Store SPSR and LR in stack */
bx lr
+UNWIND( .fnend)
END_FUNC sm_restore_modes_regs
LOCAL_FUNC sm_smc_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
srsdb sp!, #CPSR_MODE_MON
push {r0-r3}
/* Positions relative to stack pointer */
@@ -152,6 +161,7 @@ LOCAL_FUNC sm_smc_entry , :
.smc_exit:
pop {r0-r3}
rfefd sp!
+UNWIND( .fnend)
END_FUNC sm_smc_entry
/*
@@ -162,6 +172,8 @@ END_FUNC sm_smc_entry
* from FIQ.
*/
LOCAL_FUNC sm_fiq_entry , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
/* FIQ has a +4 offset for lr compared to preferred return address */
sub lr, lr, #4
srsdb sp!, #CPSR_MODE_MON
@@ -194,10 +206,13 @@ LOCAL_FUNC sm_fiq_entry , :
bl sm_restore_modes_regs
rfefd sp!
+UNWIND( .fnend)
END_FUNC sm_fiq_entry
.align 5
LOCAL_FUNC sm_vect_table , :
+UNWIND( .fnstart)
+UNWIND( .cantunwind)
b . /* Reset */
b . /* Undefined instruction */
b sm_smc_entry /* Secure monitor call */
@@ -206,11 +221,14 @@ LOCAL_FUNC sm_vect_table , :
b . /* Reserved */
b . /* IRQ */
b sm_fiq_entry /* FIQ */
+UNWIND( .fnend)
END_FUNC sm_vect_table
/* void sm_init(vaddr_t stack_pointer); */
FUNC sm_init , :
+UNWIND( .fnstart)
push {r0, lr}
+UNWIND( .save {r0, lr})
/* Set monitor stack */
mrs r1, cpsr
@@ -223,4 +241,5 @@ FUNC sm_init , :
write_mvbar r0
pop {r0, pc}
+UNWIND( .fnend)
END_FUNC sm_init
diff --git a/core/arch/arm/tee/arch_svc_a32.S b/core/arch/arm/tee/arch_svc_a32.S
index dccd23cb..72073cdb 100644
--- a/core/arch/arm/tee/arch_svc_a32.S
+++ b/core/arch/arm/tee/arch_svc_a32.S
@@ -30,6 +30,7 @@
#include <arm.h>
#include <tee_api_defines.h>
#include <kernel/thread.h>
+#include <kernel/unwind.h>
.section .text.arch_svc_asm
@@ -39,7 +40,9 @@
* Called from tee_svc_handler()
*/
FUNC tee_svc_do_call , :
+UNWIND( .fnstart)
push {r5-r9, lr}
+UNWIND( .save {r5-r9, lr})
mov r7, sp
mov r8, r0
mov r9, r1
@@ -77,6 +80,7 @@ FUNC tee_svc_do_call , :
.Lret:
mov sp, r7
pop {r5-r9, pc}
+UNWIND( .fnend)
END_FUNC tee_svc_do_call
/*
@@ -90,10 +94,12 @@ END_FUNC tee_svc_do_call
* thread_unwind_user_mode().
*/
FUNC syscall_sys_return , :
+UNWIND( .fnstart)
mov r1, #0 /* panic = false */
mov r2, #0 /* panic_code = 0 */
mov r3, r8
b tee_svc_sys_return_helper
+UNWIND( .fnend)
END_FUNC syscall_sys_return
/*
@@ -105,9 +111,11 @@ END_FUNC syscall_sys_return
* thread_svc_handler() in r8.
*/
FUNC syscall_panic , :
+UNWIND( .fnstart)
mov r1, #1 /* panic = true */
mov r2, r0 /* panic_code = 0 */
mov r3, r8
ldr r0, =TEE_ERROR_TARGET_DEAD
b tee_svc_sys_return_helper
+UNWIND( .fnend)
END_FUNC syscall_panic
diff --git a/mk/config.mk b/mk/config.mk
index aa278fea..546d6483 100644
--- a/mk/config.mk
+++ b/mk/config.mk
@@ -116,3 +116,9 @@ CFG_LIBUTILS_WITH_ISOC ?= y
# nothing with ` -mgeneral-regs-only`)
# With CFG_TA_FLOAT_SUPPORT enabled TA code is free use floating point types
CFG_TA_FLOAT_SUPPORT ?= y
+
+# Enable stack unwinding for aborts from kernel mode if CFG_TEE_CORE_DEBUG
+# is enabled
+ifeq ($(CFG_TEE_CORE_DEBUG),1)
+CFG_CORE_UNWIND ?= y
+endif
diff --git a/scripts/mem_usage.awk b/scripts/mem_usage.awk
index 5dae2d01..c5a8478a 100644
--- a/scripts/mem_usage.awk
+++ b/scripts/mem_usage.awk
@@ -62,7 +62,8 @@ function add_section(_name, _addr, _offs, _size)
size = $(name_offs + 4);
flags = $(name_offs + 6);
- if (flags == "AX" || flags == "WA" || flags == "A") {
+ if (flags == "AX" || flags == "WA" || flags == "A" ||
+ flags == "AL") {
add_section(name, addr, offs, size);
}
}