aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
Diffstat (limited to 'core')
-rw-r--r--core/arch/arm/kernel/user_ta.c2
-rw-r--r--core/arch/arm/mm/tee_mmu.c4
-rw-r--r--core/arch/arm/mm/tee_pager.c2
-rw-r--r--core/kernel/tee_ta_manager.c9
4 files changed, 11 insertions, 6 deletions
diff --git a/core/arch/arm/kernel/user_ta.c b/core/arch/arm/kernel/user_ta.c
index a9bdae07..db587d97 100644
--- a/core/arch/arm/kernel/user_ta.c
+++ b/core/arch/arm/kernel/user_ta.c
@@ -525,7 +525,7 @@ static void set_ta_ctx_ops(struct tee_ta_ctx *ctx)
bool is_user_ta_ctx(struct tee_ta_ctx *ctx)
{
- return ctx->ops == _user_ta_ops;
+ return ctx && ctx->ops == _user_ta_ops;
}
static TEE_Result check_ta_store(void)
diff --git a/core/arch/arm/mm/tee_mmu.c b/core/arch/arm/mm/tee_mmu.c
index 68f0f53d..bb4ad279 100644
--- a/core/arch/arm/mm/tee_mmu.c
+++ b/core/arch/arm/mm/tee_mmu.c
@@ -817,9 +817,9 @@ void tee_mmu_set_ctx(struct tee_ta_ctx *ctx)
*
* Save translation tables in a cache if it's a user TA.
*/
- pgt_free(&tsd->pgt_cache, tsd->ctx && is_user_ta_ctx(tsd->ctx));
+ pgt_free(&tsd->pgt_cache, is_user_ta_ctx(tsd->ctx));
- if (ctx && is_user_ta_ctx(ctx)) {
+ if (is_user_ta_ctx(ctx)) {
struct core_mmu_user_map map;
struct user_ta_ctx *utc = to_user_ta_ctx(ctx);
diff --git a/core/arch/arm/mm/tee_pager.c b/core/arch/arm/mm/tee_pager.c
index 4d171918..76a53067 100644
--- a/core/arch/arm/mm/tee_pager.c
+++ b/core/arch/arm/mm/tee_pager.c
@@ -581,7 +581,7 @@ static struct tee_pager_area *find_uta_area(vaddr_t va)
{
struct tee_ta_ctx *ctx = thread_get_tsd()->ctx;
- if (!ctx || !is_user_ta_ctx(ctx))
+ if (!is_user_ta_ctx(ctx))
return NULL;
return find_area(to_user_ta_ctx(ctx)->areas, va);
}
diff --git a/core/kernel/tee_ta_manager.c b/core/kernel/tee_ta_manager.c
index 8b9ccf56..ce6a93de 100644
--- a/core/kernel/tee_ta_manager.c
+++ b/core/kernel/tee_ta_manager.c
@@ -330,7 +330,7 @@ static bool check_params(struct tee_ta_session *sess,
* When CFG_SECURE_DATA_PATH is enabled, OP-TEE entry allows SHM and
* SDP memory references. Only TAs flagged SDP can access SDP memory.
*/
- if (sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
+ if (sess->ctx && sess->ctx->flags & TA_FLAG_SECURE_DATA_PATH)
return true;
for (n = 0; n < TEE_NUM_PARAMS; n++) {
@@ -734,7 +734,7 @@ static void update_current_ctx(struct thread_specific_data *tsd)
* If ctx->mmu == NULL we must not have user mapping active,
* if ctx->mmu != NULL we must have user mapping active.
*/
- if (((ctx && is_user_ta_ctx(ctx) ?
+ if (((is_user_ta_ctx(ctx) ?
to_user_ta_ctx(ctx)->vm_info : NULL) == NULL) ==
core_mmu_user_mapping_is_active())
panic("unexpected active mapping");
@@ -787,6 +787,11 @@ static void dump_state(struct tee_ta_ctx *ctx)
struct tee_ta_session *s = NULL;
bool active __maybe_unused;
+ if (!ctx) {
+ EMSG("No TA status: null context reference");
+ return;
+ }
+
active = ((tee_ta_get_current_session(&s) == TEE_SUCCESS) &&
s && s->ctx == ctx);