summaryrefslogtreecommitdiff
path: root/services/spd/tspd
diff options
context:
space:
mode:
authorSoby Mathew <soby.mathew@arm.com>2014-05-09 20:49:17 +0100
committerAndrew Thoelke <andrew.thoelke@arm.com>2014-05-23 08:46:21 +0100
commit239b04fa31647100c537852b4a3fc8bd47e33aa6 (patch)
tree3b2c73b2d39ed0a460375b42f1f3143703646fbc /services/spd/tspd
parenta20a81e5b4a19969673f672523b946647f5d545d (diff)
Non-Secure Interrupt support during Standard SMC processing in TSP
Implements support for Non Secure Interrupts preempting the Standard SMC call in EL1. Whenever an IRQ is trapped in the Secure world we securely handover to the Normal world to process the interrupt. The normal world then issues "resume" smc call to resume the previous interrupted SMC call. Fixes ARM-software/tf-issues#105 Change-Id: I72b760617dee27438754cdfc9fe9bcf4cc024858
Diffstat (limited to 'services/spd/tspd')
-rw-r--r--services/spd/tspd/tspd_main.c136
-rw-r--r--services/spd/tspd/tspd_private.h20
2 files changed, 129 insertions, 27 deletions
diff --git a/services/spd/tspd/tspd_main.c b/services/spd/tspd/tspd_main.c
index 74e2af0..ec2d334 100644
--- a/services/spd/tspd/tspd_main.c
+++ b/services/spd/tspd/tspd_main.c
@@ -257,7 +257,6 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
uint64_t flags)
{
cpu_context_t *ns_cpu_context;
- gp_regs_t *ns_gp_regs;
unsigned long mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr), ns;
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
@@ -268,6 +267,31 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
switch (smc_fid) {
/*
+ * This function ID is used by TSP to indicate that it was
+ * preempted by a normal world IRQ.
+ *
+ */
+ case TSP_PREEMPTED:
+ if (ns)
+ SMC_RET1(handle, SMC_UNK);
+
+ assert(handle == cm_get_context(mpidr, SECURE));
+ cm_el1_sysregs_context_save(SECURE);
+ /* Get a reference to the non-secure context */
+ ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
+ assert(ns_cpu_context);
+
+ /*
+ * Restore non-secure state. There is no need to save the
+ * secure system register context since the TSP was supposed
+ * to preserve it during S-EL1 interrupt handling.
+ */
+ cm_el1_sysregs_context_restore(NON_SECURE);
+ cm_set_next_eret_context(NON_SECURE);
+
+ SMC_RET1(ns_cpu_context, SMC_PREEMPTED);
+
+ /*
* This function ID is used only by the TSP to indicate that it has
* finished handling a S-EL1 FIQ interrupt. Execution should resume
* in the normal world.
@@ -357,9 +381,6 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
*/
tspd_synchronous_sp_exit(tsp_ctx, x1);
- /* Should never reach here */
- assert(0);
-
/*
* These function IDs is used only by the SP to indicate it has
* finished:
@@ -392,18 +413,20 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
*/
tspd_synchronous_sp_exit(tsp_ctx, x1);
- /* Should never reach here */
- assert(0);
-
/*
* Request from non-secure client to perform an
* arithmetic operation or response from secure
* payload to an earlier request.
*/
- case TSP_FID_ADD:
- case TSP_FID_SUB:
- case TSP_FID_MUL:
- case TSP_FID_DIV:
+ case TSP_FAST_FID(TSP_ADD):
+ case TSP_FAST_FID(TSP_SUB):
+ case TSP_FAST_FID(TSP_MUL):
+ case TSP_FAST_FID(TSP_DIV):
+
+ case TSP_STD_FID(TSP_ADD):
+ case TSP_STD_FID(TSP_SUB):
+ case TSP_STD_FID(TSP_MUL):
+ case TSP_STD_FID(TSP_DIV):
if (ns) {
/*
* This is a fresh request from the non-secure client.
@@ -412,11 +435,15 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
* state and send the request to the secure payload.
*/
assert(handle == cm_get_context(mpidr, NON_SECURE));
+
+ /* Check if we are already preempted */
+ if (get_std_smc_active_flag(tsp_ctx->state))
+ SMC_RET1(handle, SMC_UNK);
+
cm_el1_sysregs_context_save(NON_SECURE);
/* Save x1 and x2 for use by TSP_GET_ARGS call below */
- SMC_SET_GP(handle, CTX_GPREG_X1, x1);
- SMC_SET_GP(handle, CTX_GPREG_X2, x2);
+ store_tsp_args(tsp_ctx, x1, x2);
/*
* We are done stashing the non-secure context. Ask the
@@ -431,17 +458,27 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
* from this function.
*/
assert(&tsp_ctx->cpu_ctx == cm_get_context(mpidr, SECURE));
- set_aapcs_args7(&tsp_ctx->cpu_ctx, smc_fid, x1, x2, 0, 0,
- 0, 0, 0);
- cm_set_elr_el3(SECURE, (uint64_t) tsp_entry_info->fast_smc_entry);
+
+ /* Set appropriate entry for SMC.
+ * We expect the TSP to manage the PSTATE.I and PSTATE.F
+ * flags as appropriate.
+ */
+ if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) {
+ cm_set_elr_el3(SECURE, (uint64_t)
+ tsp_entry_info->fast_smc_entry);
+ } else {
+ set_std_smc_active_flag(tsp_ctx->state);
+ cm_set_elr_el3(SECURE, (uint64_t)
+ tsp_entry_info->std_smc_entry);
+ }
+
cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE);
-
- return smc_fid;
+ SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2);
} else {
/*
* This is the result from the secure client of an
- * earlier request. The results are in x1-x2. Copy it
+ * earlier request. The results are in x1-x3. Copy it
* into the non-secure context, save the secure state
* and return to the non-secure state.
*/
@@ -451,18 +488,53 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
/* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
assert(ns_cpu_context);
- ns_gp_regs = get_gpregs_ctx(ns_cpu_context);
/* Restore non-secure state */
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
-
- SMC_RET2(ns_gp_regs, x1, x2);
+ if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD)
+ clr_std_smc_active_flag(tsp_ctx->state);
+ SMC_RET3(ns_cpu_context, x1, x2, x3);
}
break;
/*
+ * Request from non secure world to resume the preempted
+ * Standard SMC call.
+ */
+ case TSP_FID_RESUME:
+ /* RESUME should be invoked only by normal world */
+ if (!ns) {
+ assert(0);
+ break;
+ }
+
+ /*
+ * This is a resume request from the non-secure client.
+ * save the non-secure state and send the request to
+ * the secure payload.
+ */
+ assert(handle == cm_get_context(mpidr, NON_SECURE));
+
+ /* Check if we are already preempted before resume */
+ if (!get_std_smc_active_flag(tsp_ctx->state))
+ SMC_RET1(handle, SMC_UNK);
+
+ cm_el1_sysregs_context_save(NON_SECURE);
+
+ /*
+ * We are done stashing the non-secure context. Ask the
+ * secure payload to do the work now.
+ */
+
+ /* We just need to return to the preempted point in
+ * TSP and the execution will resume as normal.
+ */
+ cm_el1_sysregs_context_restore(SECURE);
+ cm_set_next_eret_context(SECURE);
+
+ /*
* This is a request from the secure payload for more arguments
* for an ongoing arithmetic operation requested by the
* non-secure world. Simply return the arguments from the non-
@@ -475,10 +547,9 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
/* Get a reference to the non-secure context */
ns_cpu_context = cm_get_context(mpidr, NON_SECURE);
assert(ns_cpu_context);
- ns_gp_regs = get_gpregs_ctx(ns_cpu_context);
- SMC_RET2(handle, read_ctx_reg(ns_gp_regs, CTX_GPREG_X1),
- read_ctx_reg(ns_gp_regs, CTX_GPREG_X2));
+ get_tsp_args(tsp_ctx, x1, x2);
+ SMC_RET2(handle, x1, x2);
case TOS_CALL_COUNT:
/*
@@ -502,9 +573,9 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
SMC_RET1(handle, SMC_UNK);
}
-/* Define a SPD runtime service descriptor */
+/* Define a SPD runtime service descriptor for fast SMC calls */
DECLARE_RT_SVC(
- spd,
+ tspd_fast,
OEN_TOS_START,
OEN_TOS_END,
@@ -512,3 +583,14 @@ DECLARE_RT_SVC(
tspd_setup,
tspd_smc_handler
);
+
+/* Define a SPD runtime service descriptor for standard SMC calls */
+DECLARE_RT_SVC(
+ tspd_std,
+
+ OEN_TOS_START,
+ OEN_TOS_END,
+ SMC_TYPE_STD,
+ NULL,
+ tspd_smc_handler
+);
diff --git a/services/spd/tspd/tspd_private.h b/services/spd/tspd/tspd_private.h
index b9cf496..7395bb9 100644
--- a/services/spd/tspd/tspd_private.h
+++ b/services/spd/tspd/tspd_private.h
@@ -125,6 +125,12 @@
#include <cassert.h>
#include <stdint.h>
+/*
+ * The number of arguments to save during a SMC call for TSP.
+ * Currently only x1 and x2 are used by TSP.
+ */
+#define TSP_NUM_ARGS 0x2
+
/* AArch64 callee saved general purpose register context structure. */
DEFINE_REG_STRUCT(c_rt_regs, TSPD_C_RT_CTX_ENTRIES);
@@ -147,6 +153,8 @@ CASSERT(TSPD_C_RT_CTX_SIZE == sizeof(c_rt_regs_t), \
* 'c_rt_ctx' - stack address to restore C runtime context from after
* returning from a synchronous entry into the SP.
* 'cpu_ctx' - space to maintain SP architectural state
+ * 'saved_tsp_args' - space to store arguments for TSP arithmetic operations
+ * which will queried using the TSP_GET_ARGS SMC by TSP.
******************************************************************************/
typedef struct tsp_context {
uint64_t saved_elr_el3;
@@ -155,8 +163,20 @@ typedef struct tsp_context {
uint64_t mpidr;
uint64_t c_rt_ctx;
cpu_context_t cpu_ctx;
+ uint64_t saved_tsp_args[TSP_NUM_ARGS];
} tsp_context_t;
+/* Helper macros to store and retrieve tsp args from tsp_context */
+#define store_tsp_args(tsp_ctx, x1, x2) do {\
+ tsp_ctx->saved_tsp_args[0] = x1;\
+ tsp_ctx->saved_tsp_args[1] = x2;\
+ } while (0)
+
+#define get_tsp_args(tsp_ctx, x1, x2) do {\
+ x1 = tsp_ctx->saved_tsp_args[0];\
+ x2 = tsp_ctx->saved_tsp_args[1];\
+ } while (0)
+
/* TSPD power management handlers */
extern const spd_pm_ops_t tspd_pm;