aboutsummaryrefslogtreecommitdiff
path: root/core/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'core/arch/arm/kernel')
-rw-r--r--core/arch/arm/kernel/link_dummies.c4
-rw-r--r--core/arch/arm/kernel/mutex.c13
-rw-r--r--core/arch/arm/kernel/wait_queue.c17
3 files changed, 12 insertions, 22 deletions
diff --git a/core/arch/arm/kernel/link_dummies.c b/core/arch/arm/kernel/link_dummies.c
index 1aa4044d..2c49caf5 100644
--- a/core/arch/arm/kernel/link_dummies.c
+++ b/core/arch/arm/kernel/link_dummies.c
@@ -33,7 +33,7 @@ __thread_std_smc_entry(struct thread_smc_args *args __unused)
}
void __section(".text.dummy.__wq_rpc")
__wq_rpc(uint32_t func __unused, int id __unused,
- const void *sync_obj __unused, int owner __unused,
- const char *fname __unused, int lineno __unused)
+ const void *sync_obj __unused, const char *fname __unused,
+ int lineno __unused)
{
}
diff --git a/core/arch/arm/kernel/mutex.c b/core/arch/arm/kernel/mutex.c
index 0a582f80..713b3ff2 100644
--- a/core/arch/arm/kernel/mutex.c
+++ b/core/arch/arm/kernel/mutex.c
@@ -28,7 +28,6 @@ static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
uint32_t old_itr_status;
bool can_lock;
struct wait_queue_elem wqe;
- int owner = MUTEX_OWNER_ID_NONE;
/*
* If the mutex is locked we need to initialize the wqe
@@ -44,8 +43,6 @@ static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
can_lock = !m->state;
if (!can_lock) {
wq_wait_init(&m->wq, &wqe, false /* wait_read */);
- owner = m->owner_id;
- assert(owner != thread_get_id_may_fail());
} else {
m->state = -1; /* write locked */
}
@@ -57,7 +54,7 @@ static void __mutex_lock(struct mutex *m, const char *fname, int lineno)
* Someone else is holding the lock, wait in normal
* world for the lock to become available.
*/
- wq_wait_final(&m->wq, &wqe, m, owner, fname, lineno);
+ wq_wait_final(&m->wq, &wqe, m, fname, lineno);
} else
return;
}
@@ -136,7 +133,6 @@ static void __mutex_read_lock(struct mutex *m, const char *fname, int lineno)
uint32_t old_itr_status;
bool can_lock;
struct wait_queue_elem wqe;
- int owner = MUTEX_OWNER_ID_NONE;
/*
* If the mutex is locked we need to initialize the wqe
@@ -152,8 +148,6 @@ static void __mutex_read_lock(struct mutex *m, const char *fname, int lineno)
can_lock = m->state != -1;
if (!can_lock) {
wq_wait_init(&m->wq, &wqe, true /* wait_read */);
- owner = m->owner_id;
- assert(owner != thread_get_id_may_fail());
} else {
m->state++; /* read_locked */
}
@@ -165,7 +159,7 @@ static void __mutex_read_lock(struct mutex *m, const char *fname, int lineno)
* Someone else is holding the lock, wait in normal
* world for the lock to become available.
*/
- wq_wait_final(&m->wq, &wqe, m, owner, fname, lineno);
+ wq_wait_final(&m->wq, &wqe, m, fname, lineno);
} else
return;
}
@@ -358,8 +352,7 @@ static void __condvar_wait(struct condvar *cv, struct mutex *m,
if (!new_state)
wq_wake_next(&m->wq, m, fname, lineno);
- wq_wait_final(&m->wq, &wqe,
- m, MUTEX_OWNER_ID_CONDVAR_SLEEP, fname, lineno);
+ wq_wait_final(&m->wq, &wqe, m, fname, lineno);
if (old_state > 0)
mutex_read_lock(m);
diff --git a/core/arch/arm/kernel/wait_queue.c b/core/arch/arm/kernel/wait_queue.c
index 214e33fa..c9b1b060 100644
--- a/core/arch/arm/kernel/wait_queue.c
+++ b/core/arch/arm/kernel/wait_queue.c
@@ -25,18 +25,17 @@ void wq_init(struct wait_queue *wq)
* the unpaged area.
*/
void __weak __wq_rpc(uint32_t func, int id, const void *sync_obj __maybe_unused,
- int owner __maybe_unused, const char *fname,
- int lineno __maybe_unused)
+ const char *fname, int lineno __maybe_unused)
{
uint32_t ret;
const char *cmd_str __maybe_unused =
func == OPTEE_RPC_WAIT_QUEUE_SLEEP ? "sleep" : "wake ";
if (fname)
- DMSG("%s thread %u %p %d %s:%d", cmd_str, id,
- sync_obj, owner, fname, lineno);
+ DMSG("%s thread %u %p %s:%d", cmd_str, id,
+ sync_obj, fname, lineno);
else
- DMSG("%s thread %u %p %d", cmd_str, id, sync_obj, owner);
+ DMSG("%s thread %u %p", cmd_str, id, sync_obj);
struct thread_param params = THREAD_PARAM_VALUE(IN, func, id, 0);
@@ -77,15 +76,14 @@ void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
}
void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
- const void *sync_obj, int owner, const char *fname,
- int lineno)
+ const void *sync_obj, const char *fname, int lineno)
{
uint32_t old_itr_status;
unsigned done;
do {
__wq_rpc(OPTEE_RPC_WAIT_QUEUE_SLEEP, wqe->handle,
- sync_obj, owner, fname, lineno);
+ sync_obj, fname, lineno);
old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
@@ -139,8 +137,7 @@ void wq_wake_next(struct wait_queue *wq, const void *sync_obj,
if (do_wakeup)
__wq_rpc(OPTEE_RPC_WAIT_QUEUE_WAKEUP, handle,
- sync_obj, MUTEX_OWNER_ID_MUTEX_UNLOCK,
- fname, lineno);
+ sync_obj, fname, lineno);
if (!do_wakeup || !wake_read)
break;