summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorSrinivas Pandruvada <srinivas.pandruvada@linux.intel.com>2016-11-24 16:07:10 -0800
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-11-28 14:23:56 +0100
commit8442885fca09b2d26375b9fe507759879a6f661e (patch)
tree765281d711d05823b71141512d76f3755c95a72d /drivers/cpufreq
parentd5dd33d9de0d50db7f3ba221f9c4e4f74e61a69d (diff)
cpufreq: intel_pstate: Set EPP/EPB to 0 in performance mode
When user has selected performance policy, then set the EPP (Energy Performance Preference) or EPB (Energy Performance Bias) to maximum performance mode. Also when user switch back to powersave, then restore EPP/EPB to last EPP/EPB value before entering performance mode. If user has not changed EPP/EPB manually then it will be power on default value. Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/intel_pstate.c106
1 files changed, 105 insertions, 1 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7159dbde0160..0b90a63de46d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -249,6 +249,9 @@ struct perf_limits {
* when per cpu controls are enforced
* @acpi_perf_data: Stores ACPI perf information read from _PSS
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
+ * @epp_saved: Last saved HWP energy performance preference
+ * (EPP) or energy performance bias (EPB)
+ * @epp_policy: Last saved policy used to set EPP/EPB
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -276,6 +279,8 @@ struct cpudata {
bool valid_pss_table;
#endif
unsigned int iowait_boost;
+ s16 epp_saved;
+ s16 epp_policy;
};
static struct cpudata **all_cpu_data;
@@ -574,6 +579,48 @@ static inline void update_turbo_state(void)
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
+static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
+{
+ u64 epb;
+ int ret;
+
+ if (!static_cpu_has(X86_FEATURE_EPB))
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret)
+ return (s16)ret;
+
+ return (s16)(epb & 0x0f);
+}
+
+static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
+{
+ s16 epp;
+
+ if (static_cpu_has(X86_FEATURE_HWP_EPP))
+ epp = (hwp_req_data >> 24) & 0xff;
+ else
+ /* When there is no EPP present, HWP uses EPB settings */
+ epp = intel_pstate_get_epb(cpu_data);
+
+ return epp;
+}
+
+static void intel_pstate_set_epb(int cpu, s16 pref)
+{
+ u64 epb;
+
+ if (!static_cpu_has(X86_FEATURE_EPB))
+ return;
+
+ if (rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb))
+ return;
+
+ epb = (epb & ~0x0f) | pref;
+ wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
+}
+
static void intel_pstate_hwp_set(const struct cpumask *cpumask)
{
int min, hw_min, max, hw_max, cpu, range, adj_range;
@@ -582,6 +629,8 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
for_each_cpu(cpu, cpumask) {
int max_perf_pct, min_perf_pct;
+ struct cpudata *cpu_data = all_cpu_data[cpu];
+ s16 epp;
if (per_cpu_limits)
perf_limits = all_cpu_data[cpu]->perf_limits;
@@ -610,6 +659,48 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max);
+
+ if (cpu_data->epp_policy == cpu_data->policy)
+ goto skip_epp;
+
+ cpu_data->epp_policy = cpu_data->policy;
+
+ if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ epp = intel_pstate_get_epp(cpu_data, value);
+ /* If EPP read was failed, then don't try to write */
+ if (epp < 0) {
+ cpu_data->epp_saved = epp;
+ goto skip_epp;
+ }
+
+ cpu_data->epp_saved = epp;
+
+ epp = 0;
+ } else {
+ /* skip setting EPP, when saved value is invalid */
+ if (cpu_data->epp_saved < 0)
+ goto skip_epp;
+
+ /*
+ * No need to restore EPP when it is not zero. This
+ * means:
+ * - Policy is not changed
+ * - user has manually changed
+ * - Error reading EPB
+ */
+ epp = intel_pstate_get_epp(cpu_data, value);
+ if (epp)
+ goto skip_epp;
+
+ epp = cpu_data->epp_saved;
+ }
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ } else {
+ intel_pstate_set_epb(cpu, epp);
+ }
+skip_epp:
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
}
@@ -622,6 +713,17 @@ static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
return 0;
}
+static int intel_pstate_resume(struct cpufreq_policy *policy)
+{
+ if (!hwp_active)
+ return 0;
+
+ all_cpu_data[policy->cpu]->epp_policy = 0;
+ all_cpu_data[policy->cpu]->epp_saved = -EINVAL;
+
+ return intel_pstate_hwp_set_policy(policy);
+}
+
static void intel_pstate_hwp_set_online_cpus(void)
{
get_online_cpus();
@@ -872,6 +974,8 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+ cpudata->epp_policy = 0;
+ cpudata->epp_saved = -EINVAL;
}
static int atom_get_min_pstate(void)
@@ -1767,7 +1871,7 @@ static struct cpufreq_driver intel_pstate = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
.setpolicy = intel_pstate_set_policy,
- .resume = intel_pstate_hwp_set_policy,
+ .resume = intel_pstate_resume,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,