summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2017-11-08 19:47:36 +0530
committerAmit Pundir <amit.pundir@linaro.org>2018-01-22 13:15:43 +0530
commit56c4ae908c850bbf7b7953f718ae5f3799db16fe (patch)
treedc0ab20c31847e8199277ba13df3496d655a5a93 /kernel
parent63e22985919dc6e704591b59f7e82da295bc9bbf (diff)
BACKPORT: schedutil: Reset cached freq if it is not in sync with next_freq
'cached_raw_freq' is used to get the next frequency quickly but should always be in sync with sg_policy->next_freq. There are cases where it is not and in such cases it should be reset to avoid switching to incorrect frequencies. Consider this case for example: - policy->cur is 1.2 GHz (Max) - New request comes for 780 MHz and we store that in cached_raw_freq. - Based on 780 MHz, we calculate the effective frequency as 800 MHz. - We then decide not to update the frequency as sugov_up_down_rate_limit() return true. - Here cached_raw_freq is 780 MHz and sg_policy->next_freq is 1.2 GHz. - Now if the utilization doesn't change in next request, then the next target frequency will still be 780 MHz and it will match with cached_raw_freq and so we will directly return 1.2 GHz instead of 800 MHz. BACKPORT of upstream commit 07458f6a5171 ("cpufreq: schedutil: Reset cached_raw_freq when not in sync with next_freq"). This also updates sugov_update_commit() for handling up/down tunables, which aren't present in mainline. Change-Id: I70bca2c5dfdb545a0471d1c9e4c5addb30ab5494 Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/cpufreq_schedutil.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index d3765f0cb699..6c84b4d28914 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -130,8 +130,11 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
{
struct cpufreq_policy *policy = sg_policy->policy;
- if (sugov_up_down_rate_limit(sg_policy, time, next_freq))
+ if (sugov_up_down_rate_limit(sg_policy, time, next_freq)) {
+ /* Reset cached freq as next_freq isn't changed */
+ sg_policy->cached_raw_freq = 0;
return;
+ }
if (sg_policy->next_freq == next_freq)
return;
@@ -317,8 +320,12 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
*/
- if (busy && next_f < sg_policy->next_freq)
+ if (busy && next_f < sg_policy->next_freq) {
next_f = sg_policy->next_freq;
+
+ /* Reset cached freq as next_freq has changed */
+ sg_policy->cached_raw_freq = 0;
+ }
}
sugov_update_commit(sg_policy, time, next_f);
}