summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-06-11 14:46:40 +0200
committerBen Hutchings <ben@decadent.org.uk>2016-06-15 21:29:35 +0100
commit3fc871f3a24db903be27d14c1c67525aefd02d92 (patch)
treeb625093ca38eb8aab74025389acb16e4d11b5b79 /kernel
parent070654cc0f0defaf236f986014591dcc87ff9f87 (diff)
sched,rt: Remove return value from pull_rt_task()
commit 8046d6806247088de5725eaf8a2580b29e50ac5a upstream. In order to be able to use pull_rt_task() from a callback, we need to do away with the return value. Since the return value indicates if we should reschedule, do this inside the function. Since not all callers currently do this, this can increase the number of reschedules due rt balancing. Too many reschedules is not a correctness issues, too few are. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124742.679002000@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de> [Conflicts: kernel/sched/rt.c] Signed-off-by: Byungchul Park <byungchul.park@lge.com> [bwh: Backported to 3.16: use resched_task() instead of resched_curr()] Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/rt.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 919319b5db2f..ad2dcfdeec92 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -244,7 +244,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
#ifdef CONFIG_SMP
-static int pull_rt_task(struct rq *this_rq);
+static void pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
@@ -399,9 +399,8 @@ static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
return false;
}
-static inline int pull_rt_task(struct rq *this_rq)
+static inline void pull_rt_task(struct rq *this_rq)
{
- return 0;
}
static inline void queue_push_tasks(struct rq *rq)
@@ -1772,14 +1771,15 @@ static void push_rt_tasks(struct rq *rq)
;
}
-static int pull_rt_task(struct rq *this_rq)
+static void pull_rt_task(struct rq *this_rq)
{
- int this_cpu = this_rq->cpu, ret = 0, cpu;
+ int this_cpu = this_rq->cpu, cpu;
+ bool resched = false;
struct task_struct *p;
struct rq *src_rq;
if (likely(!rt_overloaded(this_rq)))
- return 0;
+ return;
/*
* Match the barrier from rt_set_overloaded; this guarantees that if we
@@ -1836,7 +1836,7 @@ static int pull_rt_task(struct rq *this_rq)
if (p->prio < src_rq->curr->prio)
goto skip;
- ret = 1;
+ resched = true;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
@@ -1852,7 +1852,8 @@ skip:
double_unlock_balance(this_rq, src_rq);
}
- return ret;
+ if (resched)
+ resched_task(this_rq->curr);
}
/*
@@ -1948,8 +1949,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
if (!p->on_rq || rq->rt.rt_nr_running)
return;
- if (pull_rt_task(rq))
- resched_task(rq->curr);
+ pull_rt_task(rq);
}
void __init init_sched_rt_class(void)