summaryrefslogtreecommitdiff
path: root/drivers/android
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/android')
-rw-r--r--drivers/android/binder.c151
1 files changed, 44 insertions, 107 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 7286cf9a726b..1fe8ab0144c6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -594,8 +594,6 @@ enum {
* (protected by @proc->inner_lock)
* @todo: list of work to do for this thread
* (protected by @proc->inner_lock)
- * @process_todo: whether work in @todo should be processed
- * (protected by @proc->inner_lock)
* @return_error: transaction errors reported by this thread
* (only accessed by this thread)
* @reply_error: transaction errors reported by target thread
@@ -622,7 +620,6 @@ struct binder_thread {
bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
- bool process_todo;
struct binder_error return_error;
struct binder_error reply_error;
wait_queue_head_t wait;
@@ -810,16 +807,6 @@ static bool binder_worklist_empty(struct binder_proc *proc,
return ret;
}
-/**
- * binder_enqueue_work_ilocked() - Add an item to the work list
- * @work: struct binder_work to add to list
- * @target_list: list to add work to
- *
- * Adds the work to the specified list. Asserts that work
- * is not already on a list.
- *
- * Requires the proc->inner_lock to be held.
- */
static void
binder_enqueue_work_ilocked(struct binder_work *work,
struct list_head *target_list)
@@ -830,56 +817,22 @@ binder_enqueue_work_ilocked(struct binder_work *work,
}
/**
- * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
- * @thread: thread to queue work to
- * @work: struct binder_work to add to list
- *
- * Adds the work to the todo list of the thread. Doesn't set the process_todo
- * flag, which means that (if it wasn't already set) the thread will go to
- * sleep without handling this work when it calls read.
- *
- * Requires the proc->inner_lock to be held.
- */
-static void
-binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
- struct binder_work *work)
-{
- binder_enqueue_work_ilocked(work, &thread->todo);
-}
-
-/**
- * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
- * @thread: thread to queue work to
- * @work: struct binder_work to add to list
- *
- * Adds the work to the todo list of the thread, and enables processing
- * of the todo queue.
- *
- * Requires the proc->inner_lock to be held.
- */
-static void
-binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
- struct binder_work *work)
-{
- binder_enqueue_work_ilocked(work, &thread->todo);
- thread->process_todo = true;
-}
-
-/**
- * binder_enqueue_thread_work() - Add an item to the thread work list
- * @thread: thread to queue work to
+ * binder_enqueue_work() - Add an item to the work list
+ * @proc: binder_proc associated with list
* @work: struct binder_work to add to list
+ * @target_list: list to add work to
*
- * Adds the work to the todo list of the thread, and enables processing
- * of the todo queue.
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
*/
static void
-binder_enqueue_thread_work(struct binder_thread *thread,
- struct binder_work *work)
+binder_enqueue_work(struct binder_proc *proc,
+ struct binder_work *work,
+ struct list_head *target_list)
{
- binder_inner_proc_lock(thread->proc);
- binder_enqueue_thread_work_ilocked(thread, work);
- binder_inner_proc_unlock(thread->proc);
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(work, target_list);
+ binder_inner_proc_unlock(proc);
}
static void
@@ -1012,7 +965,7 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work)
{
- return thread->process_todo ||
+ return !binder_worklist_empty_ilocked(&thread->todo) ||
thread->looper_need_return ||
(do_proc_work &&
!binder_worklist_empty_ilocked(&thread->proc->todo));
@@ -1429,17 +1382,6 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
binder_dequeue_work_ilocked(&node->work);
- /*
- * Note: this function is the only place where we queue
- * directly to a thread->todo without using the
- * corresponding binder_enqueue_thread_work() helper
- * functions; in this case it's ok to not set the
- * process_todo flag, since we know this node work will
- * always be followed by other work that starts queue
- * processing: in case of synchronous transactions, a
- * BR_REPLY or BR_ERROR; in case of oneway
- * transactions, a BR_TRANSACTION_COMPLETE.
- */
binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
@@ -1451,9 +1393,6 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
node->debug_id);
return -EINVAL;
}
- /*
- * See comment above
- */
binder_enqueue_work_ilocked(&node->work, target_list);
}
}
@@ -2143,9 +2082,9 @@ static void binder_send_failed_reply(struct binder_transaction *t,
binder_pop_transaction_ilocked(target_thread, t);
if (target_thread->reply_error.cmd == BR_OK) {
target_thread->reply_error.cmd = error_code;
- binder_enqueue_thread_work_ilocked(
- target_thread,
- &target_thread->reply_error.work);
+ binder_enqueue_work_ilocked(
+ &target_thread->reply_error.work,
+ &target_thread->todo);
wake_up_interruptible(&target_thread->wait);
} else {
WARN(1, "Unexpected reply error: %u\n",
@@ -2784,10 +2723,11 @@ static bool binder_proc_transaction(struct binder_transaction *t,
struct binder_proc *proc,
struct binder_thread *thread)
{
+ struct list_head *target_list = NULL;
struct binder_node *node = t->buffer->target_node;
struct binder_priority node_prio;
bool oneway = !!(t->flags & TF_ONE_WAY);
- bool pending_async = false;
+ bool wakeup = true;
BUG_ON(!node);
binder_node_lock(node);
@@ -2797,7 +2737,8 @@ static bool binder_proc_transaction(struct binder_transaction *t,
if (oneway) {
BUG_ON(thread);
if (node->has_async_transaction) {
- pending_async = true;
+ target_list = &node->async_todo;
+ wakeup = false;
} else {
node->has_async_transaction = 1;
}
@@ -2811,20 +2752,22 @@ static bool binder_proc_transaction(struct binder_transaction *t,
return false;
}
- if (!thread && !pending_async)
+ if (!thread && !target_list)
thread = binder_select_thread_ilocked(proc);
if (thread) {
+ target_list = &thread->todo;
binder_transaction_priority(thread->task, t, node_prio,
node->inherit_rt);
- binder_enqueue_thread_work_ilocked(thread, &t->work);
- } else if (!pending_async) {
- binder_enqueue_work_ilocked(&t->work, &proc->todo);
+ } else if (!target_list) {
+ target_list = &proc->todo;
} else {
- binder_enqueue_work_ilocked(&t->work, &node->async_todo);
+ BUG_ON(target_list != &node->async_todo);
}
- if (!pending_async)
+ binder_enqueue_work_ilocked(&t->work, target_list);
+
+ if (wakeup)
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
binder_inner_proc_unlock(proc);
@@ -3326,10 +3269,10 @@ static void binder_transaction(struct binder_proc *proc,
}
}
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ binder_enqueue_work(proc, tcomplete, &thread->todo);
t->work.type = BINDER_WORK_TRANSACTION;
if (reply) {
- binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
if (target_thread->is_dead) {
binder_inner_proc_unlock(target_proc);
@@ -3337,7 +3280,7 @@ static void binder_transaction(struct binder_proc *proc,
}
BUG_ON(t->buffer->async_transaction != 0);
binder_pop_transaction_ilocked(target_thread, in_reply_to);
- binder_enqueue_thread_work_ilocked(target_thread, &t->work);
+ binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
binder_inner_proc_unlock(target_proc);
wake_up_interruptible_sync(&target_thread->wait);
binder_restore_priority(current, in_reply_to->saved_priority);
@@ -3345,14 +3288,6 @@ static void binder_transaction(struct binder_proc *proc,
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
binder_inner_proc_lock(proc);
- /*
- * Defer the TRANSACTION_COMPLETE, so we don't return to
- * userspace immediately; this allows the target process to
- * immediately start processing this transaction, reducing
- * latency. We will then return the TRANSACTION_COMPLETE when
- * the target replies (or there is an error).
- */
- binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
@@ -3366,7 +3301,6 @@ static void binder_transaction(struct binder_proc *proc,
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
- binder_enqueue_thread_work(thread, tcomplete);
if (!binder_proc_transaction(t, target_proc, NULL))
goto err_dead_proc_or_thread;
}
@@ -3446,11 +3380,15 @@ err_invalid_target_handle:
if (in_reply_to) {
binder_restore_priority(current, in_reply_to->saved_priority);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
- binder_enqueue_thread_work(thread, &thread->return_error.work);
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
binder_send_failed_reply(in_reply_to, return_error);
} else {
thread->return_error.cmd = return_error;
- binder_enqueue_thread_work(thread, &thread->return_error.work);
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
}
}
@@ -3754,9 +3692,10 @@ static int binder_thread_write(struct binder_proc *proc,
WARN_ON(thread->return_error.cmd !=
BR_OK);
thread->return_error.cmd = BR_ERROR;
- binder_enqueue_thread_work(
- thread,
- &thread->return_error.work);
+ binder_enqueue_work(
+ thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
binder_debug(
BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
@@ -3836,9 +3775,9 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_thread_work_ilocked(
- thread,
- &death->work);
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &thread->todo);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -3893,8 +3832,8 @@ static int binder_thread_write(struct binder_proc *proc,
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
- binder_enqueue_thread_work_ilocked(
- thread, &death->work);
+ binder_enqueue_work_ilocked(
+ &death->work, &thread->todo);
else {
binder_enqueue_work_ilocked(
&death->work,
@@ -4068,8 +4007,6 @@ retry:
break;
}
w = binder_dequeue_work_head_ilocked(list);
- if (binder_worklist_empty_ilocked(&thread->todo))
- thread->process_todo = false;
switch (w->type) {
case BINDER_WORK_TRANSACTION: {