summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-03-31 10:34:41 -0700
committerAmit Pundir <amit.pundir@linaro.org>2017-11-20 21:15:59 +0530
commita80b8c7559da81f9af542beca13978c6ce344816 (patch)
tree4d7404e40578597e0c7c8620d7cbd97be9c5bd72 /kernel
parentace670d5c6e2b06fc0bf96fc6c47e33538ea4e98 (diff)
sched: Extend active balance to accept 'push_task' argument
Active balance currently picks one task to migrate from busy cpu to a chosen cpu (push_cpu). This patch extends active load balance to recognize a particular task ('push_task') that needs to be migrated to 'push_cpu'. This capability will be leveraged by HMP-aware task placement in a subsequent patch. Change-Id: If31320111e6cc7044e617b5c3fd6d8e0c0e16952 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> [rameezmustafa@codeaurora.org]: Port to msm-3.18] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c42
-rw-r--r--kernel/sched/sched.h1
3 files changed, 34 insertions, 10 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 18d607f9a417..679791f6c2df 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7909,6 +7909,7 @@ void __init sched_init(void)
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
+ rq->push_task = NULL;
rq->cpu = i;
rq->online = 0;
rq->idle_stamp = 0;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b904a023be95..716aa3495b63 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9429,8 +9429,18 @@ static int active_load_balance_cpu_stop(void *data)
int busiest_cpu = cpu_of(busiest_rq);
int target_cpu = busiest_rq->push_cpu;
struct rq *target_rq = cpu_rq(target_cpu);
- struct sched_domain *sd;
+ struct sched_domain *sd = NULL;
struct task_struct *p = NULL;
+ struct task_struct *push_task;
+ int push_task_detached = 0;
+ struct lb_env env = {
+ .sd = sd,
+ .dst_cpu = target_cpu,
+ .dst_rq = target_rq,
+ .src_cpu = busiest_rq->cpu,
+ .src_rq = busiest_rq,
+ .idle = CPU_IDLE,
+ };
raw_spin_lock_irq(&busiest_rq->lock);
@@ -9450,6 +9460,16 @@ static int active_load_balance_cpu_stop(void *data)
*/
BUG_ON(busiest_rq == target_rq);
+ push_task = busiest_rq->push_task;
+ if (push_task) {
+ if (task_on_rq_queued(push_task) &&
+ task_cpu(push_task) == busiest_cpu) {
+ detach_task(push_task, &env);
+ push_task_detached = 1;
+ }
+ goto out_unlock;
+ }
+
/* Search for an sd spanning us and the target CPU. */
rcu_read_lock();
for_each_domain(target_cpu, sd) {
@@ -9459,15 +9479,7 @@ static int active_load_balance_cpu_stop(void *data)
}
if (likely(sd)) {
- struct lb_env env = {
- .sd = sd,
- .dst_cpu = target_cpu,
- .dst_rq = target_rq,
- .src_cpu = busiest_rq->cpu,
- .src_rq = busiest_rq,
- .idle = CPU_IDLE,
- };
-
+ env.sd = sd;
schedstat_inc(sd, alb_count);
update_rq_clock(busiest_rq);
@@ -9485,8 +9497,18 @@ static int active_load_balance_cpu_stop(void *data)
rcu_read_unlock();
out_unlock:
busiest_rq->active_balance = 0;
+
+ if (push_task)
+ busiest_rq->push_task = NULL;
+
raw_spin_unlock(&busiest_rq->lock);
+ if (push_task) {
+ if (push_task_detached)
+ attach_one_task(target_rq, push_task);
+ put_task_struct(push_task);
+ }
+
if (p)
attach_one_task(target_rq, p);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index dd86072eaf4e..af2fd9ccaddf 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -664,6 +664,7 @@ struct rq {
/* For active balancing */
int active_balance;
int push_cpu;
+ struct task_struct *push_task;
struct cpu_stop_work active_balance_work;
/* cpu of this runqueue: */
int cpu;