From e4db2813d2e558b6b6bee464308678a57732b390 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Mon, 15 Feb 2016 02:13:42 +0100 Subject: cpufreq: governor: Avoid atomic operations in hot paths Rework the handling of work items by dbs_update_util_handler() and dbs_work_handler() so the former (which is executed in scheduler paths) only uses atomic operations when absolutely necessary. That is, when the policy is shared and dbs_update_util_handler() has already decided that this is the time to queue up a work item. In particular, this avoids the atomic ops entirely on platforms where policy objects are never shared. Signed-off-by: Rafael J. Wysocki Acked-by: Viresh Kumar --- drivers/cpufreq/cpufreq_governor.c | 51 ++++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 16 deletions(-) (limited to 'drivers/cpufreq/cpufreq_governor.c') diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index c78af11a51f0..e5a08a13ca84 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -304,6 +304,7 @@ static void gov_cancel_work(struct cpufreq_policy *policy) irq_work_sync(&policy_dbs->irq_work); cancel_work_sync(&policy_dbs->work); atomic_set(&policy_dbs->work_count, 0); + policy_dbs->work_in_progress = false; } static void dbs_work_handler(struct work_struct *work) @@ -326,13 +327,15 @@ static void dbs_work_handler(struct work_struct *work) policy_dbs->sample_delay_ns = jiffies_to_nsecs(delay); mutex_unlock(&policy_dbs->timer_mutex); + /* Allow the utilization update handler to queue up more work. */ + atomic_set(&policy_dbs->work_count, 0); /* - * If the atomic operation below is reordered with respect to the - * sample delay modification, the utilization update handler may end - * up using a stale sample delay value. + * If the update below is reordered with respect to the sample delay + * modification, the utilization update handler may end up using a stale + * sample delay value. */ - smp_mb__before_atomic(); - atomic_dec(&policy_dbs->work_count); + smp_wmb(); + policy_dbs->work_in_progress = false; } static void dbs_irq_work(struct irq_work *irq_work) @@ -348,6 +351,7 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time, { struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util); struct policy_dbs_info *policy_dbs = cdbs->policy_dbs; + u64 delta_ns; /* * The work may not be allowed to be queued up right now. @@ -355,17 +359,30 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time, * - Work has already been queued up or is in progress. * - It is too early (too little time from the previous sample). */ - if (atomic_inc_return(&policy_dbs->work_count) == 1) { - u64 delta_ns; - - delta_ns = time - policy_dbs->last_sample_time; - if ((s64)delta_ns >= policy_dbs->sample_delay_ns) { - policy_dbs->last_sample_time = time; - irq_work_queue(&policy_dbs->irq_work); - return; - } - } - atomic_dec(&policy_dbs->work_count); + if (policy_dbs->work_in_progress) + return; + + /* + * If the reads below are reordered before the check above, the value + * of sample_delay_ns used in the computation may be stale. + */ + smp_rmb(); + delta_ns = time - policy_dbs->last_sample_time; + if ((s64)delta_ns < policy_dbs->sample_delay_ns) + return; + + /* + * If the policy is not shared, the irq_work may be queued up right away + * at this point. Otherwise, we need to ensure that only one of the + * CPUs sharing the policy will do that. + */ + if (policy_dbs->is_shared && + !atomic_add_unless(&policy_dbs->work_count, 1, 1)) + return; + + policy_dbs->last_sample_time = time; + policy_dbs->work_in_progress = true; + irq_work_queue(&policy_dbs->irq_work); } static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, @@ -542,6 +559,8 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy) if (!policy->cur) return -EINVAL; + policy_dbs->is_shared = policy_is_shared(policy); + sampling_rate = dbs_data->sampling_rate; ignore_nice = dbs_data->ignore_nice_load; -- cgit v1.2.3