summaryrefslogtreecommitdiff
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c52
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c173
-rw-r--r--drivers/cpufreq/rockchip-cpufreq.c426
-rw-r--r--drivers/cpufreq/rockchip_big_little.c1343
7 files changed, 2005 insertions, 7 deletions
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index eed1e073d96d..540336f96ec1 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -95,6 +95,15 @@ config ARM_OMAP2PLUS_CPUFREQ
depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
+config ARM_ROCKCHIP_CPUFREQ
+ tristate "Rockchip CPUfreq driver"
+ depends on ARCH_ROCKCHIP && CPUFREQ_DT
+ select PM_OPP
+ help
+ This enables the rockchip CPUfreq driver.
+
+ If in doubt, say N.
+
config ARM_S3C_CPUFREQ
bool
help
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 54070bf413bf..0ee100487c2b 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_ROCKCHIP_CPUFREQ) += rockchip-cpufreq.o
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index a72ae98b4838..f07774f96a14 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -28,6 +28,11 @@
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/thermal.h>
+#ifdef CONFIG_ARCH_ROCKCHIP
+#include <soc/rockchip/rockchip_opp_select.h>
+#endif
+
+#define MAX_CLUSTERS 2
struct private_data {
struct device *cpu_dev;
@@ -149,10 +154,15 @@ static int cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
+#ifdef CONFIG_ARCH_ROCKCHIP
+ struct cpumask cpus;
+#endif
unsigned int transition_latency;
+ unsigned long cur_freq;
bool opp_v1 = false;
const char *name;
- int ret;
+ int ret, scale;
+ static int check_init;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -204,7 +214,24 @@ static int cpufreq_init(struct cpufreq_policy *policy)
*
* OPPs might be populated at runtime, don't check for error here
*/
+#ifdef CONFIG_ARCH_ROCKCHIP
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret) {
+ dev_err(cpu_dev, "couldn't find opp table for cpu:%d, %d\n",
+ policy->cpu, ret);
+ } else {
+ cpumask_copy(&cpus, policy->cpus);
+ cpumask_clear_cpu(policy->cpu, &cpus);
+ if (!cpumask_empty(&cpus)) {
+ if (dev_pm_opp_of_cpumask_add_table(&cpus))
+ dev_pm_opp_of_remove_table(cpu_dev);
+ }
+ }
+ scale = rockchip_cpufreq_get_scale(policy->cpu);
+ rockchip_adjust_power_scale(cpu_dev, scale);
+#else
dev_pm_opp_of_cpumask_add_table(policy->cpus);
+#endif
/*
* But we need OPP table to function so if it is not there let's
@@ -286,6 +313,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->up_transition_delay_us = transition_latency / NSEC_PER_USEC;
policy->down_transition_delay_us = 50000; /* 50ms */
+ if (check_init < MAX_CLUSTERS) {
+ ret = dev_pm_opp_check_initial_rate(cpu_dev, &cur_freq);
+ if (!ret)
+ policy->cur = cur_freq / 1000;
+ check_init++;
+ }
+
return 0;
out_free_cpufreq_table:
@@ -304,11 +338,22 @@ out_put_clk:
static int cpufreq_exit(struct cpufreq_policy *policy)
{
+ struct cpumask cpus;
struct private_data *priv = policy->driver_data;
+#ifdef CONFIG_ARCH_ROCKCHIP
+ cpumask_set_cpu(policy->cpu, policy->cpus);
+ if (cpufreq_generic_suspend(policy))
+ pr_err("%s: Failed to suspend driver: %p\n", __func__, policy);
+ cpumask_clear_cpu(policy->cpu, policy->cpus);
+#endif
+ priv->cpu_dev = get_cpu_device(policy->cpu);
cpufreq_cooling_unregister(priv->cdev);
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
- dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
+ cpumask_copy(&cpus, policy->related_cpus);
+ cpumask_clear_cpu(policy->cpu, &cpus);
+ dev_pm_opp_of_cpumask_remove_table(&cpus);
+ dev_pm_opp_of_remove_table(priv->cpu_dev);
if (priv->reg_name)
dev_pm_opp_put_regulator(priv->cpu_dev);
@@ -351,7 +396,8 @@ static void cpufreq_ready(struct cpufreq_policy *policy)
}
static struct cpufreq_driver dt_cpufreq_driver = {
- .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
+ .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+ CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
.verify = cpufreq_generic_frequency_table_verify,
.target_index = set_target,
.get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5dfea63204ca..2a6593dde250 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1335,9 +1335,6 @@ static int cpufreq_online(unsigned int cpu)
}
}
- blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_START, policy);
-
if (new_policy) {
ret = cpufreq_add_dev_interface(policy);
if (ret)
@@ -1351,6 +1348,9 @@ static int cpufreq_online(unsigned int cpu)
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
}
+ blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
+ CPUFREQ_START, policy);
+
ret = cpufreq_init_policy(policy);
if (ret) {
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
@@ -1953,6 +1953,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
if (cpufreq_disabled())
return -ENODEV;
+ target_freq = rockchip_cpufreq_adjust_target(policy->cpu, target_freq);
+
/* Make sure that target_freq is within supported range */
if (target_freq > policy->max)
target_freq = policy->max;
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index e0a586895136..3886b39c5861 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -19,6 +19,9 @@
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
+#ifdef CONFIG_ARCH_ROCKCHIP
+#include <linux/input.h>
+#endif
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/rwsem.h>
@@ -106,6 +109,14 @@ struct cpufreq_interactive_tunables {
int boostpulse_duration_val;
/* End time of boost pulse in ktime converted to usecs */
u64 boostpulse_endtime;
+#ifdef CONFIG_ARCH_ROCKCHIP
+ /* Frequency to which a touch boost takes the cpus to */
+ unsigned long touchboost_freq;
+ /* Duration of a touchboost pulse in usecs */
+ int touchboostpulse_duration_val;
+ /* End time of touchboost pulse in ktime converted to usecs */
+ u64 touchboostpulse_endtime;
+#endif
bool boosted;
/*
* Max additional time to wait in idle, beyond timer_rate, at speeds
@@ -383,7 +394,12 @@ static void cpufreq_interactive_timer(unsigned long data)
pcpu->policy->cur < tunables->hispeed_freq)
new_freq = tunables->hispeed_freq;
}
-
+#ifdef CONFIG_ARCH_ROCKCHIP
+ if ((now < tunables->touchboostpulse_endtime) &&
+ (new_freq < tunables->touchboost_freq)) {
+ new_freq = tunables->touchboost_freq;
+ }
+#endif
if (pcpu->policy->cur >= tunables->hispeed_freq &&
new_freq > pcpu->policy->cur &&
now - pcpu->pol_hispeed_val_time <
@@ -1133,6 +1149,150 @@ static struct notifier_block cpufreq_interactive_idle_nb = {
.notifier_call = cpufreq_interactive_idle_notifier,
};
+#ifdef CONFIG_ARCH_ROCKCHIP
+static void cpufreq_interactive_input_event(struct input_handle *handle,
+ unsigned int type,
+ unsigned int code,
+ int value)
+{
+ u64 now, endtime;
+ int i;
+ int anyboost = 0;
+ unsigned long flags[2];
+ struct cpufreq_interactive_cpuinfo *pcpu;
+ struct cpufreq_interactive_tunables *tunables;
+
+ if ((type != EV_ABS) && (type != EV_KEY))
+ return;
+
+ trace_cpufreq_interactive_boost("touch");
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
+
+ now = ktime_to_us(ktime_get());
+ for_each_online_cpu(i) {
+ pcpu = &per_cpu(cpuinfo, i);
+ if (!pcpu->policy)
+ continue;
+
+ if (have_governor_per_policy())
+ tunables = pcpu->policy->governor_data;
+ else
+ tunables = common_tunables;
+ if (!tunables)
+ continue;
+
+ endtime = now + tunables->touchboostpulse_duration_val;
+ if (endtime < (tunables->touchboostpulse_endtime +
+ 10 * USEC_PER_MSEC))
+ continue;
+ tunables->touchboostpulse_endtime = endtime;
+
+ spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
+ if (pcpu->target_freq < tunables->touchboost_freq) {
+ pcpu->target_freq = tunables->touchboost_freq;
+ cpumask_set_cpu(i, &speedchange_cpumask);
+ pcpu->loc_hispeed_val_time =
+ ktime_to_us(ktime_get());
+ anyboost = 1;
+ }
+
+ pcpu->floor_freq = tunables->touchboost_freq;
+ pcpu->loc_floor_val_time = ktime_to_us(ktime_get());
+
+ spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
+ }
+
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
+
+ if (anyboost)
+ wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_input_connect(struct input_handler *handler,
+ struct input_dev *dev,
+ const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "cpufreq";
+
+ error = input_register_handle(handle);
+ if (error)
+ goto err2;
+
+ error = input_open_device(handle);
+ if (error)
+ goto err1;
+
+ return 0;
+err1:
+ input_unregister_handle(handle);
+err2:
+ kfree(handle);
+ return error;
+}
+
+static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+static const struct input_device_id cpufreq_interactive_ids[] = {
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .evbit = { BIT_MASK(EV_ABS) },
+ .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+ BIT_MASK(ABS_MT_POSITION_X) |
+ BIT_MASK(ABS_MT_POSITION_Y) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+ .absbit = { [BIT_WORD(ABS_X)] =
+ BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+ },
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ },
+ { },
+};
+
+static struct input_handler cpufreq_interactive_input_handler = {
+ .event = cpufreq_interactive_input_event,
+ .connect = cpufreq_interactive_input_connect,
+ .disconnect = cpufreq_interactive_input_disconnect,
+ .name = "cpufreq_interactive",
+ .id_table = cpufreq_interactive_ids,
+};
+
+static void rockchip_cpufreq_policy_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_interactive_tunables *tunables = policy->governor_data;
+
+ tunables->min_sample_time = 40 * USEC_PER_MSEC;
+ tunables->boostpulse_duration_val = 40 * USEC_PER_MSEC;
+ if (policy->cpu == 0) {
+ tunables->hispeed_freq = 1008000;
+ tunables->touchboostpulse_duration_val = 500 * USEC_PER_MSEC;
+ tunables->touchboost_freq = 1200000;
+ } else {
+ tunables->hispeed_freq = 816000;
+ }
+}
+#endif
+
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event)
{
@@ -1187,6 +1347,10 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
common_tunables = tunables;
}
+#ifdef CONFIG_ARCH_ROCKCHIP
+ rockchip_cpufreq_policy_init(policy);
+#endif
+
rc = sysfs_create_group(get_governor_parent_kobj(policy),
get_sysfs_attr());
if (rc) {
@@ -1202,6 +1366,10 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
idle_notifier_register(&cpufreq_interactive_idle_nb);
cpufreq_register_notifier(&cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
+#ifdef CONFIG_ARCH_ROCKCHIP
+ rc = input_register_handler(&cpufreq_interactive_input_handler);
+#endif
+
}
break;
@@ -1209,6 +1377,9 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
case CPUFREQ_GOV_POLICY_EXIT:
if (!--tunables->usage_count) {
if (policy->governor->initialized == 1) {
+#ifdef CONFIG_ARCH_ROCKCHIP
+ input_unregister_handler(&cpufreq_interactive_input_handler);
+#endif
cpufreq_unregister_notifier(&cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
idle_notifier_unregister(&cpufreq_interactive_idle_nb);
diff --git a/drivers/cpufreq/rockchip-cpufreq.c b/drivers/cpufreq/rockchip-cpufreq.c
new file mode 100644
index 000000000000..adf30dcb17f0
--- /dev/null
+++ b/drivers/cpufreq/rockchip-cpufreq.c
@@ -0,0 +1,426 @@
+/*
+ * Rockchip CPUFreq Driver
+ *
+ * Copyright (C) 2017 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/reboot.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/soc/rockchip/pvtm.h>
+#include <linux/thermal.h>
+#include <soc/rockchip/rockchip_opp_select.h>
+
+#include "../clk/rockchip/clk.h"
+
+#define LEAKAGE_INVALID 0xff
+#define REBOOT_FREQ 816000 /* kHz */
+
+struct cluster_info {
+ struct list_head list_head;
+ cpumask_t cpus;
+ unsigned int reboot_freq;
+ unsigned int threshold_freq;
+ int volt_sel;
+ int scale;
+ int process;
+ bool offline;
+ bool rebooting;
+ bool freq_limit;
+};
+static LIST_HEAD(cluster_info_list);
+
+static int rk3288_get_soc_info(struct device *dev, struct device_node *np,
+ int *bin, int *process)
+{
+ int ret = 0, value = -EINVAL;
+
+ if (!bin)
+ goto next;
+ if (of_property_match_string(np, "nvmem-cell-names", "special") >= 0) {
+ ret = rockchip_get_efuse_value(np, "special", &value);
+ if (ret) {
+ dev_err(dev, "Failed to get soc special value\n");
+ goto out;
+ }
+ if (value == 0xc)
+ *bin = 0;
+ else
+ *bin = 1;
+ }
+ if (of_property_match_string(np, "nvmem-cell-names",
+ "performance") >= 0) {
+ ret = rockchip_get_efuse_value(np, "performance", &value);
+ if (ret) {
+ dev_err(dev, "Failed to get soc performance value\n");
+ goto out;
+ }
+ if (value == 0x2)
+ *bin = 2;
+ }
+ if (*bin >= 0)
+ dev_info(dev, "bin=%d\n", *bin);
+
+next:
+ if (!process)
+ goto out;
+ if (of_property_match_string(np, "nvmem-cell-names",
+ "process") >= 0) {
+ ret = rockchip_get_efuse_value(np, "process", &value);
+ if (ret) {
+ dev_err(dev, "Failed to get soc process version\n");
+ goto out;
+ }
+ if (value == 0 || value == 1)
+ *process = 0;
+ }
+ if (*process >= 0)
+ dev_info(dev, "process=%d\n", *process);
+
+out:
+ return ret;
+}
+
+static const struct of_device_id rockchip_cpufreq_of_match[] = {
+ {
+ .compatible = "rockchip,rk3288",
+ .data = (void *)&rk3288_get_soc_info,
+ },
+ {
+ .compatible = "rockchip,rk3288w",
+ .data = (void *)&rk3288_get_soc_info,
+ },
+ {},
+};
+
+static struct cluster_info *rockchip_cluster_info_lookup(int cpu)
+{
+ struct cluster_info *cluster;
+
+ list_for_each_entry(cluster, &cluster_info_list, list_head) {
+ if (cpumask_test_cpu(cpu, &cluster->cpus))
+ return cluster;
+ }
+
+ return NULL;
+}
+
+int rockchip_cpufreq_get_scale(int cpu)
+{
+ struct cluster_info *cluster;
+
+ cluster = rockchip_cluster_info_lookup(cpu);
+ if (!cluster)
+ return 0;
+ else
+ return cluster->scale;
+}
+EXPORT_SYMBOL_GPL(rockchip_cpufreq_get_scale);
+
+static int rockchip_cpufreq_cluster_init(int cpu, struct cluster_info *cluster)
+{
+ struct device_node *np;
+ struct device *dev;
+ int ret = 0, bin = -EINVAL;
+
+ cluster->process = -EINVAL;
+ cluster->volt_sel = -EINVAL;
+ cluster->scale = 0;
+
+ dev = get_cpu_device(cpu);
+ if (!dev)
+ return -ENODEV;
+
+ np = of_parse_phandle(dev->of_node, "operating-points-v2", 0);
+ if (!np) {
+ dev_warn(dev, "OPP-v2 not supported\n");
+ return -ENOENT;
+ }
+
+ ret = dev_pm_opp_of_get_sharing_cpus(dev, &cluster->cpus);
+ if (ret) {
+ dev_err(dev, "Failed to get sharing cpus\n");
+ goto np_err;
+ }
+
+ if (of_property_read_u32(np, "rockchip,reboot-freq",
+ &cluster->reboot_freq))
+ cluster->reboot_freq = REBOOT_FREQ;
+ of_property_read_u32(np, "rockchip,threshold-freq",
+ &cluster->threshold_freq);
+ cluster->freq_limit = of_property_read_bool(np, "rockchip,freq-limit");
+
+ rockchip_get_soc_info(dev, rockchip_cpufreq_of_match,
+ &bin, &cluster->process);
+ rockchip_get_scale_volt_sel(dev, "cpu_leakage", "cpu",
+ bin, cluster->process,
+ &cluster->scale, &cluster->volt_sel);
+np_err:
+ of_node_put(np);
+ return ret;
+}
+
+static int rockchip_cpufreq_set_opp_info(int cpu, struct cluster_info *cluster)
+{
+ struct device *dev = get_cpu_device(cpu);
+
+ if (!dev)
+ return -ENODEV;
+ return rockchip_set_opp_info(dev, cluster->process,
+ cluster->volt_sel);
+}
+
+static int rockchip_hotcpu_notifier(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+ struct cluster_info *cluster;
+ cpumask_t cpus;
+ int number, ret;
+
+ cluster = rockchip_cluster_info_lookup(cpu);
+ if (!cluster)
+ return NOTIFY_OK;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_ONLINE:
+ if (cluster->offline) {
+ ret = rockchip_cpufreq_set_opp_info(cpu, cluster);
+ if (ret)
+ pr_err("Failed to set cpu%d opp_info\n", cpu);
+ cluster->offline = false;
+ }
+ break;
+
+ case CPU_POST_DEAD:
+ cpumask_and(&cpus, &cluster->cpus, cpu_online_mask);
+ number = cpumask_weight(&cpus);
+ if (!number)
+ cluster->offline = true;
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rockchip_hotcpu_nb = {
+ .notifier_call = rockchip_hotcpu_notifier,
+};
+
+static int rockchip_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *ptr)
+{
+ int cpu;
+ struct cluster_info *cluster;
+
+ list_for_each_entry(cluster, &cluster_info_list, list_head) {
+ cpu = cpumask_first_and(&cluster->cpus, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ continue;
+ cluster->rebooting = true;
+ cpufreq_update_policy(cpu);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rockchip_reboot_nb = {
+ .notifier_call = rockchip_reboot_notifier,
+};
+
+static int rockchip_cpufreq_policy_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ struct cluster_info *cluster;
+ int cpu = policy->cpu;
+
+ if (event != CPUFREQ_ADJUST)
+ return NOTIFY_OK;
+
+ cluster = rockchip_cluster_info_lookup(cpu);
+ if (!cluster)
+ return NOTIFY_DONE;
+
+ if (cluster->rebooting) {
+ if (cluster->reboot_freq < policy->max)
+ policy->max = cluster->reboot_freq;
+ policy->min = policy->max;
+ pr_info("cpu%d limit freq=%d min=%d max=%d\n",
+ policy->cpu, cluster->reboot_freq,
+ policy->min, policy->max);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block rockchip_cpufreq_policy_nb = {
+ .notifier_call = rockchip_cpufreq_policy_notifier,
+};
+
+static struct cpufreq_policy *rockchip_get_policy(struct cluster_info *cluster)
+{
+ int first_cpu;
+
+ first_cpu = cpumask_first_and(&cluster->cpus, cpu_online_mask);
+ if (first_cpu >= nr_cpu_ids)
+ return NULL;
+
+ return cpufreq_cpu_get(first_cpu);
+}
+
+/**
+ * rockchip_cpufreq_adjust_target() - Adjust cpu target frequency
+ * @cpu: CPU number
+ * @freq: Expected target frequency
+ *
+ * This adjusts cpu target frequency for reducing power consumption.
+ * Only one cluster can eanble frequency limit, and the cluster's
+ * maximum frequency will be limited to its threshold frequency, if the
+ * other cluster's frequency is geater than or equal to its threshold
+ * frequency.
+ */
+unsigned int rockchip_cpufreq_adjust_target(int cpu, unsigned int freq)
+{
+ struct cpufreq_policy *policy;
+ struct cluster_info *cluster, *temp;
+
+ cluster = rockchip_cluster_info_lookup(cpu);
+ if (!cluster || !cluster->threshold_freq)
+ goto adjust_out;
+
+ if (cluster->freq_limit) {
+ if (freq <= cluster->threshold_freq)
+ goto adjust_out;
+
+ list_for_each_entry(temp, &cluster_info_list, list_head) {
+ if (temp->freq_limit || temp == cluster ||
+ temp->offline)
+ continue;
+
+ policy = rockchip_get_policy(temp);
+ if (!policy)
+ continue;
+
+ if (temp->threshold_freq &&
+ temp->threshold_freq <= policy->cur) {
+ cpufreq_cpu_put(policy);
+ return cluster->threshold_freq;
+ }
+ cpufreq_cpu_put(policy);
+ }
+ } else {
+ if (freq < cluster->threshold_freq)
+ goto adjust_out;
+
+ list_for_each_entry(temp, &cluster_info_list, list_head) {
+ if (!temp->freq_limit || temp == cluster ||
+ temp->offline)
+ continue;
+
+ policy = rockchip_get_policy(temp);
+ if (!policy)
+ continue;
+
+ if (temp->threshold_freq &&
+ temp->threshold_freq < policy->cur)
+ cpufreq_driver_target(policy,
+ temp->threshold_freq,
+ CPUFREQ_RELATION_H);
+ cpufreq_cpu_put(policy);
+ }
+ }
+
+adjust_out:
+
+ return freq;
+}
+EXPORT_SYMBOL_GPL(rockchip_cpufreq_adjust_target);
+
+static int __init rockchip_cpufreq_driver_init(void)
+{
+ struct platform_device *pdev;
+ struct cluster_info *cluster, *pos;
+ int cpu, first_cpu, ret, i = 0;
+
+ for_each_possible_cpu(cpu) {
+ cluster = rockchip_cluster_info_lookup(cpu);
+ if (cluster)
+ continue;
+
+ cluster = kzalloc(sizeof(*cluster), GFP_KERNEL);
+ if (!cluster)
+ return -ENOMEM;
+
+ ret = rockchip_cpufreq_cluster_init(cpu, cluster);
+ if (ret) {
+ if (ret != -ENOENT) {
+ pr_err("Failed to cpu%d parse_dt\n", cpu);
+ return ret;
+ }
+
+ /*
+ * As the OPP document said, only one OPP binding
+ * should be used per device.
+ * And if there are multiple clusters on rockchip
+ * platforms, we should use operating-points-v2.
+ * So if don't support operating-points-v2, there must
+ * be only one cluster, the list shuold be null.
+ */
+ list_for_each_entry(pos, &cluster_info_list, list_head)
+ i++;
+ if (i)
+ return ret;
+ /*
+ * If don't support operating-points-v2, there is no
+ * need to register notifiers.
+ */
+ goto next;
+ }
+
+ first_cpu = cpumask_first_and(&cluster->cpus, cpu_online_mask);
+ ret = rockchip_cpufreq_set_opp_info(first_cpu, cluster);
+ if (ret) {
+ pr_err("Failed to set cpu%d opp_info\n", first_cpu);
+ return ret;
+ }
+
+ list_add(&cluster->list_head, &cluster_info_list);
+ }
+
+ register_hotcpu_notifier(&rockchip_hotcpu_nb);
+ register_reboot_notifier(&rockchip_reboot_nb);
+ cpufreq_register_notifier(&rockchip_cpufreq_policy_nb,
+ CPUFREQ_POLICY_NOTIFIER);
+
+next:
+ pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+module_init(rockchip_cpufreq_driver_init);
+
+MODULE_AUTHOR("Finley Xiao <finley.xiao@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip cpufreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpufreq/rockchip_big_little.c b/drivers/cpufreq/rockchip_big_little.c
new file mode 100644
index 000000000000..f14f3b1e2fce
--- /dev/null
+++ b/drivers/cpufreq/rockchip_big_little.c
@@ -0,0 +1,1343 @@
+/*
+ * Copyright (C) 2015 Fuzhou Rockchip Electronics Co., Ltd
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/kernel_stat.h>
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/suspend.h>
+#include <linux/tick.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/string.h>
+#ifdef CONFIG_ROCKCHIP_CPUQUIET
+#include <linux/cpuquiet.h>
+#include <linux/pm_qos.h>
+#endif
+#include <linux/rockchip/cpu.h>
+#include <linux/rockchip/dvfs.h>
+#include <asm/smp_plat.h>
+#include <asm/unistd.h>
+#include <linux/uaccess.h>
+#include <asm/system_misc.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/rockchip/common.h>
+#include <dt-bindings/clock/rk_system_status.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include "../../../drivers/clk/rockchip/clk-pd.h"
+
+#define VERSION "1.0"
+#define MAX_CLUSTERS 2
+#define B_CLUSTER 0
+#define L_CLUSTER 1
+
+#ifdef DEBUG
+#define FREQ_DBG(fmt, args...) pr_debug(fmt, ## args)
+#define FREQ_LOG(fmt, args...) pr_debug(fmt, ## args)
+#else
+#define FREQ_DBG(fmt, args...) do {} while (0)
+#define FREQ_LOG(fmt, args...) do {} while (0)
+#endif
+#define FREQ_ERR(fmt, args...) pr_err(fmt, ## args)
+
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
+/*********************************************************/
+/* additional symantics for "relation" in cpufreq with pm */
+#define DISABLE_FURTHER_CPUFREQ 0x10
+#define ENABLE_FURTHER_CPUFREQ 0x20
+#define MASK_FURTHER_CPUFREQ 0x30
+#define CPU_LOW_FREQ 600000 /* KHz */
+#define CCI_LOW_RATE 288000000 /* Hz */
+#define CCI_HIGH_RATE 576000000 /* Hz */
+/* With 0x00(NOCHANGE), it depends on the previous "further" status */
+#define CPUFREQ_PRIVATE 0x100
+static unsigned int no_cpufreq_access[MAX_CLUSTERS] = { 0 };
+static unsigned int suspend_freq[MAX_CLUSTERS] = { 816 * 1000, 816 * 1000 };
+static unsigned int suspend_volt = 1100000;
+static unsigned int low_battery_freq[MAX_CLUSTERS] = { 600 * 1000,
+ 600 * 1000 };
+static unsigned int low_battery_capacity = 5;
+static bool is_booting = true;
+static DEFINE_MUTEX(cpufreq_mutex);
+static struct dvfs_node *clk_cpu_dvfs_node[MAX_CLUSTERS];
+static struct dvfs_node *clk_gpu_dvfs_node;
+static struct dvfs_node *clk_ddr_dvfs_node;
+static cpumask_var_t cluster_policy_mask[MAX_CLUSTERS];
+static struct clk *aclk_cci;
+static unsigned long cci_rate;
+static unsigned int cpu_bl_freq[MAX_CLUSTERS];
+
+#ifdef CONFIG_ROCKCHIP_CPUQUIET
+static void rockchip_bl_balanced_cpufreq_transition(unsigned int cluster,
+ unsigned int cpu_freq);
+static struct cpuquiet_governor rockchip_bl_balanced_governor;
+#endif
+
+/*******************************************************/
+static inline int cpu_to_cluster(int cpu)
+{
+ int id = topology_physical_package_id(cpu);
+ if (id < 0)
+ id = 0;
+ return id;
+}
+
+static unsigned int rockchip_bl_cpufreq_get_rate(unsigned int cpu)
+{
+ u32 cur_cluster = cpu_to_cluster(cpu);
+
+ if (clk_cpu_dvfs_node[cur_cluster])
+ return clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
+
+ return 0;
+}
+
+static bool cpufreq_is_ondemand(struct cpufreq_policy *policy)
+{
+ char c = 0;
+
+ if (policy && policy->governor)
+ c = policy->governor->name[0];
+ return (c == 'o' || c == 'i' || c == 'c' || c == 'h');
+}
+
+static unsigned int get_freq_from_table(unsigned int max_freq,
+ unsigned int cluster)
+{
+ unsigned int i;
+ unsigned int target_freq = 0;
+
+ for (i = 0; freq_table[cluster][i].frequency != CPUFREQ_TABLE_END;
+ i++) {
+ unsigned int freq = freq_table[cluster][i].frequency;
+
+ if (freq <= max_freq && target_freq < freq)
+ target_freq = freq;
+ }
+ if (!target_freq)
+ target_freq = max_freq;
+ return target_freq;
+}
+
+static int rockchip_bl_cpufreq_notifier_policy(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ static unsigned int min_rate = 0, max_rate = -1;
+ struct cpufreq_policy *policy = data;
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+
+ if (val != CPUFREQ_ADJUST)
+ return 0;
+
+ if (cpufreq_is_ondemand(policy)) {
+ FREQ_DBG("queue work\n");
+ dvfs_clk_enable_limit(clk_cpu_dvfs_node[cur_cluster],
+ min_rate, max_rate);
+ } else {
+ FREQ_DBG("cancel work\n");
+ dvfs_clk_get_limit(clk_cpu_dvfs_node[cur_cluster],
+ &min_rate, &max_rate);
+ }
+
+ return 0;
+}
+
+static struct notifier_block notifier_policy_block = {
+ .notifier_call = rockchip_bl_cpufreq_notifier_policy
+};
+
+static int rockchip_bl_cpufreq_notifier_trans(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ unsigned int cluster = cpu_to_cluster(freq->cpu);
+ int ret;
+
+ cpu_bl_freq[cluster] = freq->new;
+
+ switch (val) {
+ case CPUFREQ_PRECHANGE:
+ if (cpu_bl_freq[B_CLUSTER] > CPU_LOW_FREQ ||
+ cpu_bl_freq[L_CLUSTER] > CPU_LOW_FREQ) {
+ if (cci_rate != CCI_HIGH_RATE) {
+ ret = clk_set_rate(aclk_cci, CCI_HIGH_RATE);
+ if (ret)
+ break;
+ pr_debug("ccirate %ld-->%d Hz\n",
+ cci_rate, CCI_HIGH_RATE);
+ cci_rate = CCI_HIGH_RATE;
+ }
+ }
+ break;
+ case CPUFREQ_POSTCHANGE:
+ if (cpu_bl_freq[B_CLUSTER] <= CPU_LOW_FREQ &&
+ cpu_bl_freq[L_CLUSTER] <= CPU_LOW_FREQ) {
+ if (cci_rate != CCI_LOW_RATE) {
+ ret = clk_set_rate(aclk_cci, CCI_LOW_RATE);
+ if (ret)
+ break;
+ pr_debug("ccirate %ld-->%d Hz\n",
+ cci_rate, CCI_LOW_RATE);
+ cci_rate = CCI_LOW_RATE;
+ }
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block notifier_trans_block = {
+ .notifier_call = rockchip_bl_cpufreq_notifier_trans,
+};
+
+static int rockchip_bl_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+
+ if (!freq_table[cur_cluster])
+ return -EINVAL;
+ return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+}
+
+static int clk_node_get_cluster_id(struct clk *clk)
+{
+ int i;
+
+ for (i = 0; i < MAX_CLUSTERS; i++) {
+ if (clk_cpu_dvfs_node[i]->clk == clk)
+ return i;
+ }
+ return 0;
+}
+
+static int rockchip_bl_cpufreq_scale_rate_for_dvfs(struct clk *clk,
+ unsigned long rate)
+{
+ int ret;
+ struct cpufreq_freqs freqs;
+ struct cpufreq_policy *policy;
+ u32 cur_cluster, cpu;
+
+ cur_cluster = clk_node_get_cluster_id(clk);
+ cpu = cpumask_first_and(cluster_policy_mask[cur_cluster],
+ cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ return -EINVAL;
+
+ freqs.new = rate / 1000;
+ freqs.old = clk_get_rate(clk) / 1000;
+
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
+
+ FREQ_DBG("cpufreq_scale_rate_for_dvfs(%lu)\n", rate);
+
+ ret = clk_set_rate(clk, rate);
+
+ freqs.new = clk_get_rate(clk) / 1000;
+
+#ifdef CONFIG_ROCKCHIP_CPUQUIET
+ rockchip_bl_balanced_cpufreq_transition(cur_cluster, freqs.new);
+#endif
+
+ /* notifiers */
+ cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
+
+ cpufreq_cpu_put(policy);
+ return ret;
+}
+
+static int cluster_cpus_freq_dvfs_init(u32 cluster_id, char *dvfs_name)
+{
+ int v = INT_MAX;
+ int i;
+
+ clk_cpu_dvfs_node[cluster_id] = clk_get_dvfs_node(dvfs_name);
+
+ if (!clk_cpu_dvfs_node[cluster_id]) {
+ FREQ_ERR("%s:cluster_id=%d,get dvfs err\n",
+ __func__, cluster_id);
+ return -EINVAL;
+ }
+ dvfs_clk_register_set_rate_callback(
+ clk_cpu_dvfs_node[cluster_id],
+ rockchip_bl_cpufreq_scale_rate_for_dvfs);
+ freq_table[cluster_id] =
+ dvfs_get_freq_volt_table(clk_cpu_dvfs_node[cluster_id]);
+ if (!freq_table[cluster_id]) {
+ FREQ_ERR("No freq table for cluster %d\n", cluster_id);
+ return -EINVAL;
+ }
+
+ for (i = 0; freq_table[cluster_id][i].frequency != CPUFREQ_TABLE_END;
+ i++) {
+ if (freq_table[cluster_id][i].index >= suspend_volt &&
+ v > freq_table[cluster_id][i].index) {
+ suspend_freq[cluster_id] =
+ freq_table[cluster_id][i].frequency;
+ v = freq_table[cluster_id][i].index;
+ }
+ }
+ low_battery_freq[cluster_id] =
+ get_freq_from_table(low_battery_freq[cluster_id], cluster_id);
+ clk_enable_dvfs(clk_cpu_dvfs_node[cluster_id]);
+ return 0;
+}
+
+static int rockchip_bl_cpufreq_init_cpu0(struct cpufreq_policy *policy)
+{
+ clk_gpu_dvfs_node = clk_get_dvfs_node("clk_gpu");
+ if (clk_gpu_dvfs_node)
+ clk_enable_dvfs(clk_gpu_dvfs_node);
+
+ clk_ddr_dvfs_node = clk_get_dvfs_node("clk_ddr");
+ if (clk_ddr_dvfs_node)
+ clk_enable_dvfs(clk_ddr_dvfs_node);
+
+ cluster_cpus_freq_dvfs_init(B_CLUSTER, "clk_core_b");
+ cluster_cpus_freq_dvfs_init(L_CLUSTER, "clk_core_l");
+
+ cpufreq_register_notifier(&notifier_policy_block,
+ CPUFREQ_POLICY_NOTIFIER);
+
+ aclk_cci = clk_get(NULL, "aclk_cci");
+ if (!IS_ERR(aclk_cci)) {
+ cci_rate = clk_get_rate(aclk_cci);
+ if (clk_cpu_dvfs_node[L_CLUSTER])
+ cpu_bl_freq[L_CLUSTER] =
+ clk_get_rate(clk_cpu_dvfs_node[L_CLUSTER]->clk) / 1000;
+ if (clk_cpu_dvfs_node[B_CLUSTER])
+ cpu_bl_freq[B_CLUSTER] =
+ clk_get_rate(clk_cpu_dvfs_node[B_CLUSTER]->clk) / 1000;
+ cpufreq_register_notifier(&notifier_trans_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
+ pr_info("version " VERSION ", suspend freq %d %d MHz\n",
+ suspend_freq[0] / 1000, suspend_freq[1] / 1000);
+ return 0;
+}
+
+static int rockchip_bl_cpufreq_init(struct cpufreq_policy *policy)
+{
+ static int cpu0_err;
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+
+ if (policy->cpu == 0)
+ cpu0_err = rockchip_bl_cpufreq_init_cpu0(policy);
+ if (cpu0_err)
+ return cpu0_err;
+
+ /* set freq min max */
+ cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+ /* sys nod */
+ cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+
+ if (cur_cluster < MAX_CLUSTERS) {
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ cpumask_copy(cluster_policy_mask[cur_cluster],
+ topology_core_cpumask(policy->cpu));
+ }
+
+ policy->cur = clk_get_rate(clk_cpu_dvfs_node[cur_cluster]->clk) / 1000;
+
+ /* make ondemand default sampling_rate to 40000 */
+ policy->cpuinfo.transition_latency = 40 * NSEC_PER_USEC;
+
+ return 0;
+}
+
+static int rockchip_bl_cpufreq_exit(struct cpufreq_policy *policy)
+{
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+
+ if (policy->cpu == 0) {
+ cpufreq_unregister_notifier(&notifier_policy_block,
+ CPUFREQ_POLICY_NOTIFIER);
+ }
+ cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+ clk_put_dvfs_node(clk_cpu_dvfs_node[cur_cluster]);
+
+ return 0;
+}
+
+static struct freq_attr *rockchip_bl_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+#ifdef CONFIG_CHARGER_DISPLAY
+extern int rk_get_system_battery_capacity(void);
+#else
+static int rk_get_system_battery_capacity(void)
+{
+ return 100;
+}
+#endif
+
+static unsigned int
+rockchip_bl_cpufreq_scale_limit(unsigned int target_freq,
+ struct cpufreq_policy *policy, bool is_private)
+{
+ bool is_ondemand = cpufreq_is_ondemand(policy);
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+
+ if (!is_ondemand)
+ return target_freq;
+
+ if (is_booting) {
+ s64 boottime_ms = ktime_to_ms(ktime_get_boottime());
+
+ if (boottime_ms > 60 * MSEC_PER_SEC) {
+ is_booting = false;
+ } else if (target_freq > low_battery_freq[cur_cluster] &&
+ rk_get_system_battery_capacity() <=
+ low_battery_capacity) {
+ target_freq = low_battery_freq[cur_cluster];
+ }
+ }
+
+ return target_freq;
+}
+
+static int rockchip_bl_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int i, new_freq = target_freq, new_rate, cur_rate;
+ int ret = 0;
+ bool is_private;
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
+
+ if (!freq_table[cur_cluster]) {
+ FREQ_ERR("no freq table!\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&cpufreq_mutex);
+
+ is_private = relation & CPUFREQ_PRIVATE;
+ relation &= ~CPUFREQ_PRIVATE;
+
+ if ((relation & ENABLE_FURTHER_CPUFREQ) &&
+ no_cpufreq_access[cur_cluster])
+ no_cpufreq_access[cur_cluster]--;
+ if (no_cpufreq_access[cur_cluster]) {
+ FREQ_LOG("denied access to %s as it is disabled temporarily\n",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (relation & DISABLE_FURTHER_CPUFREQ)
+ no_cpufreq_access[cur_cluster]++;
+ relation &= ~MASK_FURTHER_CPUFREQ;
+
+ ret = cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
+ target_freq, relation, &i);
+ if (ret) {
+ FREQ_ERR("no freq match for %d(ret=%d)\n", target_freq, ret);
+ goto out;
+ }
+ new_freq = freq_table[cur_cluster][i].frequency;
+ if (!no_cpufreq_access[cur_cluster])
+ new_freq =
+ rockchip_bl_cpufreq_scale_limit(new_freq, policy,
+ is_private);
+
+ new_rate = new_freq * 1000;
+ cur_rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[cur_cluster]);
+ FREQ_LOG("req = %7u new = %7u (was = %7u)\n", target_freq,
+ new_freq, cur_rate / 1000);
+ if (new_rate == cur_rate)
+ goto out;
+ ret = dvfs_clk_set_rate(clk_cpu_dvfs_node[cur_cluster], new_rate);
+
+out:
+ FREQ_DBG("set freq (%7u) end, ret %d\n", new_freq, ret);
+ mutex_unlock(&cpufreq_mutex);
+ return ret;
+}
+
+static int rockchip_bl_cpufreq_pm_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int ret = NOTIFY_DONE;
+ int i;
+ struct cpufreq_policy *policy;
+ u32 cpu;
+
+ for (i = 0; i < MAX_CLUSTERS; i++) {
+ cpu = cpumask_first_and(cluster_policy_mask[i],
+ cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ continue;
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+
+ if (!cpufreq_is_ondemand(policy))
+ goto out;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ policy->cur++;
+ ret = cpufreq_driver_target(policy, suspend_freq[i],
+ DISABLE_FURTHER_CPUFREQ |
+ CPUFREQ_RELATION_H);
+ if (ret < 0) {
+ ret = NOTIFY_BAD;
+ goto out;
+ }
+ ret = NOTIFY_OK;
+ break;
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ /* if (target_freq == policy->cur) then
+ cpufreq_driver_target will return, and
+ our target will not be called, it casue
+ ENABLE_FURTHER_CPUFREQ flag invalid,
+ avoid that. */
+ policy->cur++;
+ cpufreq_driver_target(policy, suspend_freq[i],
+ ENABLE_FURTHER_CPUFREQ |
+ CPUFREQ_RELATION_H);
+ ret = NOTIFY_OK;
+ break;
+ }
+out:
+ cpufreq_cpu_put(policy);
+ }
+
+ return ret;
+}
+
+static struct notifier_block rockchip_bl_cpufreq_pm_notifier = {
+ .notifier_call = rockchip_bl_cpufreq_pm_notifier_event,
+};
+
+static int rockchip_bl_cpufreq_reboot_limit_freq(void)
+{
+ struct regulator *regulator;
+ int volt = 0;
+ u32 rate;
+ int i;
+
+ dvfs_disable_temp_limit();
+
+ for (i = 0; i < MAX_CLUSTERS; i++) {
+ dvfs_clk_enable_limit(clk_cpu_dvfs_node[i],
+ 1000 * suspend_freq[i],
+ 1000 * suspend_freq[i]);
+ rate = dvfs_clk_get_rate(clk_cpu_dvfs_node[i]);
+ }
+
+ regulator = dvfs_get_regulator("vdd_arm");
+ if (regulator)
+ volt = regulator_get_voltage(regulator);
+ else
+ pr_info("get arm regulator failed\n");
+ pr_info("reboot set cluster0 rate=%lu, cluster1 rate=%lu, volt=%d\n",
+ dvfs_clk_get_rate(clk_cpu_dvfs_node[0]),
+ dvfs_clk_get_rate(clk_cpu_dvfs_node[1]), volt);
+
+ return 0;
+}
+
+static int rockchip_bl_cpufreq_reboot_notifier_event(struct notifier_block
+ *this, unsigned long event,
+ void *ptr)
+{
+ rockchip_set_system_status(SYS_STATUS_REBOOT);
+ rockchip_bl_cpufreq_reboot_limit_freq();
+
+ return NOTIFY_OK;
+};
+
+static struct notifier_block rockchip_bl_cpufreq_reboot_notifier = {
+ .notifier_call = rockchip_bl_cpufreq_reboot_notifier_event,
+};
+
+static struct cpufreq_driver rockchip_bl_cpufreq_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .verify = rockchip_bl_cpufreq_verify,
+ .target = rockchip_bl_cpufreq_target,
+ .get = rockchip_bl_cpufreq_get_rate,
+ .init = rockchip_bl_cpufreq_init,
+ .exit = rockchip_bl_cpufreq_exit,
+ .name = "rockchip-bl",
+ .have_governor_per_policy = true,
+ .attr = rockchip_bl_cpufreq_attr,
+};
+
+static const struct of_device_id rockchip_bl_cpufreq_match[] = {
+ {
+ .compatible = "rockchip,rk3368-cpufreq",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rockchip_bl_cpufreq_match);
+
+static int __init rockchip_bl_cpufreq_probe(struct platform_device *pdev)
+{
+ int ret, i;
+
+ for (i = 0; i < MAX_CLUSTERS; i++) {
+ if (!alloc_cpumask_var(&cluster_policy_mask[i], GFP_KERNEL))
+ return -ENOMEM;
+ }
+
+ register_reboot_notifier(&rockchip_bl_cpufreq_reboot_notifier);
+ register_pm_notifier(&rockchip_bl_cpufreq_pm_notifier);
+
+ ret = cpufreq_register_driver(&rockchip_bl_cpufreq_driver);
+
+#ifdef CONFIG_ROCKCHIP_CPUQUIET
+ ret = cpuquiet_register_governor(&rockchip_bl_balanced_governor);
+#endif
+
+ return ret;
+}
+
+static int rockchip_bl_cpufreq_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < MAX_CLUSTERS; i++)
+ free_cpumask_var(cluster_policy_mask[i]);
+ cpufreq_unregister_driver(&rockchip_bl_cpufreq_driver);
+ return 0;
+}
+
+static struct platform_driver rockchip_bl_cpufreq_platdrv = {
+ .driver = {
+ .name = "rockchip-bl-cpufreq",
+ .owner = THIS_MODULE,
+ .of_match_table = rockchip_bl_cpufreq_match,
+ },
+ .remove = rockchip_bl_cpufreq_remove,
+};
+
+module_platform_driver_probe(rockchip_bl_cpufreq_platdrv,
+ rockchip_bl_cpufreq_probe);
+
+MODULE_AUTHOR("Xiao Feng <xf@rock-chips.com>");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_ROCKCHIP_CPUQUIET
+extern struct cpumask hmp_slow_cpu_mask;
+
+enum cpu_speed_balance {
+ CPU_SPEED_BALANCED,
+ CPU_SPEED_BIASED,
+ CPU_SPEED_SKEWED,
+ CPU_SPEED_BOOST,
+};
+
+enum balanced_state {
+ IDLE,
+ DOWN,
+ UP,
+};
+
+struct idle_info {
+ u64 idle_last_us;
+ u64 idle_current_us;
+};
+
+static u64 idleinfo_timestamp_us;
+static u64 idleinfo_last_timestamp_us;
+static DEFINE_PER_CPU(struct idle_info, idleinfo);
+static DEFINE_PER_CPU(unsigned int, cpu_load);
+
+static struct timer_list load_timer;
+static bool load_timer_active;
+
+/* configurable parameters */
+static unsigned int balance_level = 60;
+static unsigned int idle_bottom_freq[MAX_CLUSTERS];
+static unsigned int idle_top_freq[MAX_CLUSTERS];
+static unsigned int cpu_freq[MAX_CLUSTERS];
+static unsigned long up_delay_jiffies;
+static unsigned long down_delay_jiffies;
+static unsigned long last_change_time_jiffies;
+static unsigned int load_sample_rate_jiffies = 20 / (MSEC_PER_SEC / HZ);
+static unsigned int little_high_load = 80;
+static unsigned int little_low_load = 20;
+static unsigned int big_low_load = 20;
+static struct workqueue_struct *rockchip_bl_balanced_wq;
+static struct delayed_work rockchip_bl_balanced_work;
+static enum balanced_state rockchip_bl_balanced_state;
+static struct kobject *rockchip_bl_balanced_kobj;
+static DEFINE_MUTEX(rockchip_bl_balanced_lock);
+static bool rockchip_bl_balanced_enable;
+
+#define GOVERNOR_NAME "bl_balanced"
+
+static u64 get_idle_us(int cpu)
+{
+ return get_cpu_idle_time(cpu, NULL, 1 /* io_busy */);
+}
+
+static void calculate_load_timer(unsigned long data)
+{
+ int i;
+ u64 elapsed_time;
+
+ if (!load_timer_active)
+ return;
+
+ idleinfo_last_timestamp_us = idleinfo_timestamp_us;
+ idleinfo_timestamp_us = ktime_to_us(ktime_get());
+ elapsed_time = idleinfo_timestamp_us - idleinfo_last_timestamp_us;
+
+ for_each_present_cpu(i) {
+ struct idle_info *iinfo = &per_cpu(idleinfo, i);
+ unsigned int *load = &per_cpu(cpu_load, i);
+ u64 idle_time;
+
+ iinfo->idle_last_us = iinfo->idle_current_us;
+ iinfo->idle_current_us = get_idle_us(i);
+
+ idle_time = iinfo->idle_current_us - iinfo->idle_last_us;
+ idle_time *= 100;
+ do_div(idle_time, elapsed_time);
+ if (idle_time > 100)
+ idle_time = 100;
+ *load = 100 - idle_time;
+ }
+ mod_timer(&load_timer, jiffies + load_sample_rate_jiffies);
+}
+
+static void start_load_timer(void)
+{
+ int i;
+
+ if (load_timer_active)
+ return;
+
+ idleinfo_timestamp_us = ktime_to_us(ktime_get());
+ for_each_present_cpu(i) {
+ struct idle_info *iinfo = &per_cpu(idleinfo, i);
+
+ iinfo->idle_current_us = get_idle_us(i);
+ }
+ mod_timer(&load_timer, jiffies + load_sample_rate_jiffies);
+
+ load_timer_active = true;
+}
+
+static void stop_load_timer(void)
+{
+ if (!load_timer_active)
+ return;
+
+ load_timer_active = false;
+ del_timer(&load_timer);
+}
+
+static unsigned int get_slowest_cpu(void)
+{
+ unsigned int cpu = nr_cpu_ids;
+ unsigned long minload = ULONG_MAX;
+ int i;
+
+ for_each_online_cpu(i) {
+ unsigned int load = per_cpu(cpu_load, i);
+
+ if ((i > 0) && (minload >= load)) {
+ cpu = i;
+ minload = load;
+ }
+ }
+
+ return cpu;
+}
+
+static unsigned int get_offline_big_cpu(void)
+{
+ struct cpumask big, offline_big;
+
+ cpumask_andnot(&big, cpu_present_mask, &hmp_slow_cpu_mask);
+ cpumask_andnot(&offline_big, &big, cpu_online_mask);
+ return cpumask_first(&offline_big);
+}
+
+static unsigned int cpu_highest_speed(void)
+{
+ unsigned int maxload = 0;
+ int i;
+
+ for_each_online_cpu(i) {
+ unsigned int load = per_cpu(cpu_load, i);
+
+ maxload = max(maxload, load);
+ }
+
+ return maxload;
+}
+
+static unsigned int count_slow_cpus(unsigned int limit)
+{
+ unsigned int cnt = 0;
+ int i;
+
+ for_each_online_cpu(i) {
+ unsigned int load = per_cpu(cpu_load, i);
+
+ if (load <= limit)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+#define NR_FSHIFT 2
+
+static unsigned int rt_profile[NR_CPUS] = {
+/* 1, 2, 3, 4, 5, 6, 7, 8 - on-line cpus target */
+ 5, 9, 10, 11, 12, 13, 14, UINT_MAX
+};
+
+static unsigned int nr_run_hysteresis = 2; /* 0.5 thread */
+static unsigned int nr_run_last;
+
+struct runnables_avg_sample {
+ u64 previous_integral;
+ unsigned int avg;
+ bool integral_sampled;
+ u64 prev_timestamp; /* ns */
+};
+
+static DEFINE_PER_CPU(struct runnables_avg_sample, avg_nr_sample);
+
+static unsigned int get_avg_nr_runnables(void)
+{
+ unsigned int i, sum = 0;
+ struct runnables_avg_sample *sample;
+ u64 integral, old_integral, delta_integral, delta_time, cur_time;
+
+ cur_time = ktime_to_ns(ktime_get());
+
+ for_each_online_cpu(i) {
+ sample = &per_cpu(avg_nr_sample, i);
+ integral = nr_running_integral(i);
+ old_integral = sample->previous_integral;
+ sample->previous_integral = integral;
+ delta_time = cur_time - sample->prev_timestamp;
+ sample->prev_timestamp = cur_time;
+
+ if (!sample->integral_sampled) {
+ sample->integral_sampled = true;
+ /* First sample to initialize prev_integral, skip
+ * avg calculation
+ */
+ continue;
+ }
+
+ if (integral < old_integral) {
+ /* Overflow */
+ delta_integral = (ULLONG_MAX - old_integral) + integral;
+ } else {
+ delta_integral = integral - old_integral;
+ }
+
+ /* Calculate average for the previous sample window */
+ do_div(delta_integral, delta_time);
+ sample->avg = delta_integral;
+ sum += sample->avg;
+ }
+
+ return sum;
+}
+
+static bool rockchip_bl_balanced_speed_boost(void)
+{
+ unsigned int cpu;
+ struct cpumask online_little;
+ unsigned int big_cpu;
+ bool has_low_load_little_cpu = false;
+
+ if (cpu_freq[L_CLUSTER] < idle_top_freq[L_CLUSTER])
+ return false;
+
+ cpumask_and(&online_little, cpu_online_mask, &hmp_slow_cpu_mask);
+
+ for_each_cpu(cpu, &online_little) {
+ if (per_cpu(cpu_load, cpu) < little_low_load) {
+ has_low_load_little_cpu = true;
+ break;
+ }
+ }
+
+ for_each_cpu(cpu, &online_little) {
+ unsigned int load;
+ unsigned int avg;
+ struct cpumask online_big;
+ bool has_low_load_big_cpu;
+
+ load = per_cpu(cpu_load, cpu);
+ /* skip low load cpu */
+ if (load < little_high_load)
+ continue;
+
+ avg = per_cpu(avg_nr_sample, cpu).avg;
+ /*
+ * skip when we have low load cpu,
+ * when cpu load is high because run many task.
+ * we can migrate the task to low load cpu
+ */
+ if (has_low_load_little_cpu &&
+ (avg >> (FSHIFT - NR_FSHIFT)) >= 4)
+ continue;
+
+ /*
+ * found one cpu which is busy by run one thread,
+ * break if no big cpu offline
+ */
+ if (get_offline_big_cpu() >= nr_cpu_ids)
+ break;
+
+ cpumask_andnot(&online_big,
+ cpu_online_mask, &hmp_slow_cpu_mask);
+
+ has_low_load_big_cpu = false;
+ for_each_cpu(big_cpu, &online_big) {
+ unsigned int big_load;
+
+ big_load = per_cpu(cpu_load, big_cpu);
+ if (big_load < big_low_load) {
+ has_low_load_big_cpu = true;
+ break;
+ }
+ }
+ /* if we have idle big cpu, never up new one */
+ if (has_low_load_big_cpu)
+ break;
+
+ return true;
+ }
+
+ return false;
+}
+
+static enum cpu_speed_balance rockchip_bl_balanced_speed_balance(void)
+{
+ unsigned long highest_speed = cpu_highest_speed();
+ unsigned long balanced_speed = highest_speed * balance_level / 100;
+ unsigned long skewed_speed = balanced_speed / 2;
+ unsigned int nr_cpus = num_online_cpus();
+ unsigned int max_cpus = pm_qos_request(PM_QOS_MAX_ONLINE_CPUS);
+ unsigned int min_cpus = pm_qos_request(PM_QOS_MIN_ONLINE_CPUS);
+ unsigned int avg_nr_run = get_avg_nr_runnables();
+ unsigned int nr_run;
+
+ if (max_cpus > nr_cpu_ids || max_cpus == 0)
+ max_cpus = nr_cpu_ids;
+
+ if (rockchip_bl_balanced_speed_boost())
+ return CPU_SPEED_BOOST;
+
+ /* balanced: freq targets for all CPUs are above 60% of highest speed
+ biased: freq target for at least one CPU is below 60% threshold
+ skewed: freq targets for at least 2 CPUs are below 30% threshold */
+ for (nr_run = 1; nr_run < ARRAY_SIZE(rt_profile); nr_run++) {
+ unsigned int nr_threshold = rt_profile[nr_run - 1];
+
+ if (nr_run_last <= nr_run)
+ nr_threshold += nr_run_hysteresis;
+ if (avg_nr_run <= (nr_threshold << (FSHIFT - NR_FSHIFT)))
+ break;
+ }
+ nr_run_last = nr_run;
+
+ if ((count_slow_cpus(skewed_speed) >= 2 ||
+ nr_run < nr_cpus ||
+ (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
+ cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) ||
+ nr_cpus > max_cpus) &&
+ nr_cpus > min_cpus)
+ return CPU_SPEED_SKEWED;
+
+ if ((count_slow_cpus(balanced_speed) >= 1 ||
+ nr_run <= nr_cpus ||
+ (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
+ cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) ||
+ nr_cpus == max_cpus) &&
+ nr_cpus >= min_cpus)
+ return CPU_SPEED_BIASED;
+
+ return CPU_SPEED_BALANCED;
+}
+
+static void rockchip_bl_balanced_work_func(struct work_struct *work)
+{
+ bool up = false;
+ unsigned int cpu = nr_cpu_ids;
+ unsigned long now = jiffies;
+ struct workqueue_struct *wq = rockchip_bl_balanced_wq;
+ struct delayed_work *dwork = to_delayed_work(work);
+ enum cpu_speed_balance balance;
+
+ mutex_lock(&rockchip_bl_balanced_lock);
+
+ if (!rockchip_bl_balanced_enable)
+ goto out;
+
+ switch (rockchip_bl_balanced_state) {
+ case IDLE:
+ break;
+ case DOWN:
+ cpu = get_slowest_cpu();
+ if (cpu < nr_cpu_ids) {
+ up = false;
+ queue_delayed_work(wq, dwork, up_delay_jiffies);
+ } else {
+ stop_load_timer();
+ }
+ break;
+ case UP:
+ balance = rockchip_bl_balanced_speed_balance();
+ switch (balance) {
+ case CPU_SPEED_BOOST:
+ cpu = get_offline_big_cpu();
+ if (cpu < nr_cpu_ids)
+ up = true;
+ break;
+ /* cpu speed is up and balanced - one more on-line */
+ case CPU_SPEED_BALANCED:
+ cpu = cpumask_next_zero(0, cpu_online_mask);
+ if (cpu < nr_cpu_ids)
+ up = true;
+ break;
+ /* cpu speed is up, but skewed - remove one core */
+ case CPU_SPEED_SKEWED:
+ cpu = get_slowest_cpu();
+ if (cpu < nr_cpu_ids)
+ up = false;
+ break;
+ /* cpu speed is up, but under-utilized - do nothing */
+ case CPU_SPEED_BIASED:
+ default:
+ break;
+ }
+ queue_delayed_work(wq, dwork, up_delay_jiffies);
+ break;
+ default:
+ pr_err("%s: invalid cpuquiet governor state %d\n",
+ __func__, rockchip_bl_balanced_state);
+ }
+
+ if (!up && ((now - last_change_time_jiffies) < down_delay_jiffies))
+ cpu = nr_cpu_ids;
+
+ if (cpu < nr_cpu_ids) {
+ last_change_time_jiffies = now;
+ if (up)
+ cpuquiet_wake_cpu(cpu, false);
+ else
+ cpuquiet_quiesence_cpu(cpu, false);
+ }
+
+out:
+ mutex_unlock(&rockchip_bl_balanced_lock);
+}
+
+static void rockchip_bl_balanced_cpufreq_transition(unsigned int cluster,
+ unsigned int new_cpu_freq)
+{
+ struct workqueue_struct *wq;
+ struct delayed_work *dwork;
+
+ mutex_lock(&rockchip_bl_balanced_lock);
+
+ if (!rockchip_bl_balanced_enable)
+ goto out;
+
+ wq = rockchip_bl_balanced_wq;
+ dwork = &rockchip_bl_balanced_work;
+ cpu_freq[cluster] = new_cpu_freq;
+
+ switch (rockchip_bl_balanced_state) {
+ case IDLE:
+ if (cpu_freq[B_CLUSTER] >= idle_top_freq[B_CLUSTER] ||
+ cpu_freq[L_CLUSTER] >= idle_top_freq[L_CLUSTER]) {
+ rockchip_bl_balanced_state = UP;
+ queue_delayed_work(wq, dwork, up_delay_jiffies);
+ start_load_timer();
+ } else if (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
+ cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) {
+ rockchip_bl_balanced_state = DOWN;
+ queue_delayed_work(wq, dwork, down_delay_jiffies);
+ start_load_timer();
+ }
+ break;
+ case DOWN:
+ if (cpu_freq[B_CLUSTER] >= idle_top_freq[B_CLUSTER] ||
+ cpu_freq[L_CLUSTER] >= idle_top_freq[L_CLUSTER]) {
+ rockchip_bl_balanced_state = UP;
+ queue_delayed_work(wq, dwork, up_delay_jiffies);
+ start_load_timer();
+ }
+ break;
+ case UP:
+ if (cpu_freq[B_CLUSTER] <= idle_bottom_freq[B_CLUSTER] &&
+ cpu_freq[L_CLUSTER] <= idle_bottom_freq[L_CLUSTER]) {
+ rockchip_bl_balanced_state = DOWN;
+ queue_delayed_work(wq, dwork, up_delay_jiffies);
+ start_load_timer();
+ }
+ break;
+ default:
+ pr_err("%s: invalid cpuquiet governor state %d\n",
+ __func__, rockchip_bl_balanced_state);
+ }
+
+out:
+ mutex_unlock(&rockchip_bl_balanced_lock);
+}
+
+static void delay_callback(struct cpuquiet_attribute *attr)
+{
+ unsigned long val;
+
+ if (attr) {
+ val = (*((unsigned long *)(attr->param)));
+ (*((unsigned long *)(attr->param))) = msecs_to_jiffies(val);
+ }
+}
+
+#define CPQ_BASIC_ATTRIBUTE_B(_name, _mode, _type) \
+ static struct cpuquiet_attribute _name ## _b_attr = { \
+ .attr = {.name = __stringify(_name ## _b), .mode = _mode },\
+ .show = show_ ## _type ## _attribute, \
+ .store = store_ ## _type ## _attribute, \
+ .param = &_name[B_CLUSTER], \
+}
+#define CPQ_BASIC_ATTRIBUTE_L(_name, _mode, _type) \
+ static struct cpuquiet_attribute _name ## _l_attr = { \
+ .attr = {.name = __stringify(_name ## _l), .mode = _mode },\
+ .show = show_ ## _type ## _attribute, \
+ .store = store_ ## _type ## _attribute, \
+ .param = &_name[L_CLUSTER], \
+}
+CPQ_BASIC_ATTRIBUTE(balance_level, 0644, uint);
+CPQ_BASIC_ATTRIBUTE_B(idle_bottom_freq, 0644, uint);
+CPQ_BASIC_ATTRIBUTE_L(idle_bottom_freq, 0644, uint);
+CPQ_BASIC_ATTRIBUTE_B(idle_top_freq, 0644, uint);
+CPQ_BASIC_ATTRIBUTE_L(idle_top_freq, 0644, uint);
+CPQ_BASIC_ATTRIBUTE(load_sample_rate_jiffies, 0644, uint);
+CPQ_BASIC_ATTRIBUTE(nr_run_hysteresis, 0644, uint);
+CPQ_BASIC_ATTRIBUTE(little_high_load, 0644, uint);
+CPQ_BASIC_ATTRIBUTE(little_low_load, 0644, uint);
+CPQ_BASIC_ATTRIBUTE(big_low_load, 0644, uint);
+CPQ_ATTRIBUTE(up_delay_jiffies, 0644, ulong, delay_callback);
+CPQ_ATTRIBUTE(down_delay_jiffies, 0644, ulong, delay_callback);
+
+#define MAX_BYTES 100
+
+static ssize_t show_rt_profile(struct cpuquiet_attribute *attr, char *buf)
+{
+ char buffer[MAX_BYTES];
+ unsigned int i;
+ int size = 0;
+
+ buffer[0] = 0;
+ for (i = 0; i < ARRAY_SIZE(rt_profile); i++) {
+ size += snprintf(buffer + size, sizeof(buffer) - size,
+ "%u ", rt_profile[i]);
+ }
+ return snprintf(buf, sizeof(buffer), "%s\n", buffer);
+}
+
+static ssize_t store_rt_profile(struct cpuquiet_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret, i = 0;
+ char *val, *str, input[MAX_BYTES];
+ unsigned int profile[ARRAY_SIZE(rt_profile)];
+
+ if (!count || count >= MAX_BYTES)
+ return -EINVAL;
+ strncpy(input, buf, count);
+ input[count] = '\0';
+ str = input;
+ memcpy(profile, rt_profile, sizeof(rt_profile));
+ while ((val = strsep(&str, " ")) != NULL) {
+ if (*val == '\0')
+ continue;
+ if (i == ARRAY_SIZE(rt_profile) - 1)
+ break;
+ ret = kstrtouint(val, 10, &profile[i]);
+ if (ret)
+ return -EINVAL;
+ i++;
+ }
+
+ memcpy(rt_profile, profile, sizeof(profile));
+
+ return count;
+}
+CPQ_ATTRIBUTE_CUSTOM(rt_profile, 0644,
+ show_rt_profile, store_rt_profile);
+
+static struct attribute *rockchip_bl_balanced_attributes[] = {
+ &balance_level_attr.attr,
+ &idle_bottom_freq_b_attr.attr,
+ &idle_bottom_freq_l_attr.attr,
+ &idle_top_freq_b_attr.attr,
+ &idle_top_freq_l_attr.attr,
+ &up_delay_jiffies_attr.attr,
+ &down_delay_jiffies_attr.attr,
+ &load_sample_rate_jiffies_attr.attr,
+ &nr_run_hysteresis_attr.attr,
+ &rt_profile_attr.attr,
+ &little_high_load_attr.attr,
+ &little_low_load_attr.attr,
+ &big_low_load_attr.attr,
+ NULL,
+};
+
+static const struct sysfs_ops rockchip_bl_balanced_sysfs_ops = {
+ .show = cpuquiet_auto_sysfs_show,
+ .store = cpuquiet_auto_sysfs_store,
+};
+
+static struct kobj_type rockchip_bl_balanced_ktype = {
+ .sysfs_ops = &rockchip_bl_balanced_sysfs_ops,
+ .default_attrs = rockchip_bl_balanced_attributes,
+};
+
+static int rockchip_bl_balanced_sysfs(void)
+{
+ int err;
+ struct kobject *kobj;
+
+ kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+
+ if (!kobj)
+ return -ENOMEM;
+
+ err = cpuquiet_kobject_init(kobj, &rockchip_bl_balanced_ktype,
+ GOVERNOR_NAME);
+
+ if (err)
+ kfree(kobj);
+
+ rockchip_bl_balanced_kobj = kobj;
+
+ return err;
+}
+
+static void rockchip_bl_balanced_stop(void)
+{
+ mutex_lock(&rockchip_bl_balanced_lock);
+
+ rockchip_bl_balanced_enable = false;
+ /* now we can force the governor to be idle */
+ rockchip_bl_balanced_state = IDLE;
+
+ mutex_unlock(&rockchip_bl_balanced_lock);
+
+ cancel_delayed_work_sync(&rockchip_bl_balanced_work);
+
+ destroy_workqueue(rockchip_bl_balanced_wq);
+ rockchip_bl_balanced_wq = NULL;
+ del_timer_sync(&load_timer);
+
+ kobject_put(rockchip_bl_balanced_kobj);
+ kfree(rockchip_bl_balanced_kobj);
+ rockchip_bl_balanced_kobj = NULL;
+}
+
+static int rockchip_bl_balanced_start(void)
+{
+ int err, count, cluster;
+ struct cpufreq_frequency_table *table;
+ unsigned int initial_freq;
+
+ err = rockchip_bl_balanced_sysfs();
+ if (err)
+ return err;
+
+ up_delay_jiffies = msecs_to_jiffies(100);
+ down_delay_jiffies = msecs_to_jiffies(2000);
+
+ for (cluster = 0; cluster < MAX_CLUSTERS; cluster++) {
+ table = freq_table[cluster];
+ if (!table)
+ return -EINVAL;
+
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END;
+ count++)
+ ;
+
+ if (count < 4)
+ return -EINVAL;
+
+ idle_top_freq[cluster] = table[(count / 2) - 1].frequency;
+ idle_bottom_freq[cluster] = table[(count / 2) - 2].frequency;
+ }
+
+ rockchip_bl_balanced_wq
+ = alloc_workqueue(GOVERNOR_NAME, WQ_UNBOUND | WQ_FREEZABLE, 1);
+ if (!rockchip_bl_balanced_wq)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&rockchip_bl_balanced_work,
+ rockchip_bl_balanced_work_func);
+
+ init_timer(&load_timer);
+ load_timer.function = calculate_load_timer;
+
+ mutex_lock(&rockchip_bl_balanced_lock);
+ rockchip_bl_balanced_enable = true;
+ if (clk_cpu_dvfs_node[L_CLUSTER])
+ cpu_freq[L_CLUSTER] =
+ clk_get_rate(clk_cpu_dvfs_node[L_CLUSTER]->clk) / 1000;
+ if (clk_cpu_dvfs_node[B_CLUSTER])
+ cpu_freq[B_CLUSTER] =
+ clk_get_rate(clk_cpu_dvfs_node[B_CLUSTER]->clk) / 1000;
+ mutex_unlock(&rockchip_bl_balanced_lock);
+
+ /* Kick start the state machine */
+ initial_freq = cpufreq_get(0);
+ if (initial_freq)
+ rockchip_bl_balanced_cpufreq_transition(L_CLUSTER,
+ initial_freq);
+
+ return 0;
+}
+
+static struct cpuquiet_governor rockchip_bl_balanced_governor = {
+ .name = GOVERNOR_NAME,
+ .start = rockchip_bl_balanced_start,
+ .stop = rockchip_bl_balanced_stop,
+ .owner = THIS_MODULE,
+};
+#endif