summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/intel_pstate.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-05-06 22:01:14 +0200
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-05-06 22:01:14 +0200
commitda43af961b50f1b6367660c0dba0b5fd205c4217 (patch)
tree2f0d60beae3b2a3c5f1d2441e24e90104dc010f9 /drivers/cpufreq/intel_pstate.c
parent9485e4ca0b486248ce07d7dd1411a1080d24ed0d (diff)
parent6d45b719cbd51f014bb1b5dd8ed99068d78d36af (diff)
Merge cpufreq fixes going into v4.6.
* pm-cpufreq-fixes: intel_pstate: Fix intel_pstate_get() cpufreq: intel_pstate: Fix HWP on boot CPU after system resume cpufreq: st: enable selective initialization based on the platform cpufreq: intel_pstate: Fix processing for turbo activation ratio
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r--drivers/cpufreq/intel_pstate.c31
1 files changed, 23 insertions, 8 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 74453fe546c1..3addb22725e1 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -586,6 +586,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
}
}
+static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
+{
+ if (hwp_active)
+ intel_pstate_hwp_set(policy->cpus);
+
+ return 0;
+}
+
static void intel_pstate_hwp_set_online_cpus(void)
{
get_online_cpus();
@@ -944,6 +952,11 @@ static int core_get_max_pstate(void)
if (err)
goto skip_tar;
+ /* For level 1 and 2, bits[23:16] contain the ratio */
+ if (tdp_ctrl)
+ tdp_ratio >>= 16;
+
+ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
if (tdp_ratio - 1 == tar) {
max_pstate = tar;
pr_debug("max_pstate=TAC %x\n", max_pstate);
@@ -1188,8 +1201,9 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
static inline int32_t get_avg_frequency(struct cpudata *cpu)
{
- return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
- cpu->pstate.scaling, cpu->sample.mperf);
+ return fp_toint(mul_fp(cpu->sample.core_pct_busy,
+ int_tofp(cpu->pstate.max_pstate_physical *
+ cpu->pstate.scaling / 100)));
}
static inline int32_t get_avg_pstate(struct cpudata *cpu)
@@ -1238,8 +1252,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
int32_t core_busy, max_pstate, current_pstate, sample_ratio;
u64 duration_ns;
- intel_pstate_calc_busy(cpu);
-
/*
* core_busy is the ratio of actual performance to max
* max_pstate is the max non turbo pstate available
@@ -1322,8 +1334,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
if ((s64)delta_ns >= pid_params.sample_rate_ns) {
bool sample_taken = intel_pstate_sample(cpu, time);
- if (sample_taken && !hwp_active)
- intel_pstate_adjust_busy_pstate(cpu);
+ if (sample_taken) {
+ intel_pstate_calc_busy(cpu);
+ if (!hwp_active)
+ intel_pstate_adjust_busy_pstate(cpu);
+ }
}
}
@@ -1485,8 +1500,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
out:
intel_pstate_set_update_util_hook(policy->cpu);
- if (hwp_active)
- intel_pstate_hwp_set(policy->cpus);
+ intel_pstate_hwp_set_policy(policy);
return 0;
}
@@ -1558,6 +1572,7 @@ static struct cpufreq_driver intel_pstate_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
.setpolicy = intel_pstate_set_policy,
+ .resume = intel_pstate_hwp_set_policy,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,