summaryrefslogtreecommitdiff
path: root/kernel/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c218
1 files changed, 183 insertions, 35 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d36d8e0abfb8..c506485eaa75 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -52,6 +52,7 @@ struct cpuhp_cpu_state {
bool rollback;
bool single;
bool bringup;
+ struct hlist_node *node;
enum cpuhp_state cb_state;
int result;
struct completion done;
@@ -70,11 +71,21 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
* @cant_stop: Bringup/teardown can't be stopped at this step
*/
struct cpuhp_step {
- const char *name;
- int (*startup)(unsigned int cpu);
- int (*teardown)(unsigned int cpu);
- bool skip_onerr;
- bool cant_stop;
+ const char *name;
+ union {
+ int (*startup)(unsigned int cpu);
+ int (*startup_multi)(unsigned int cpu,
+ struct hlist_node *node);
+ };
+ union {
+ int (*teardown)(unsigned int cpu);
+ int (*teardown_multi)(unsigned int cpu,
+ struct hlist_node *node);
+ };
+ struct hlist_head list;
+ bool skip_onerr;
+ bool cant_stop;
+ bool multi_instance;
};
static DEFINE_MUTEX(cpuhp_state_mutex);
@@ -104,20 +115,59 @@ static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
* @step: The step in the state machine
* @bringup: True if the bringup callback should be invoked
*
- * Called from cpu hotplug and from the state register machinery
+ * Called from cpu hotplug and from the state register machinery.
*/
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
- bool bringup)
+ bool bringup, struct hlist_node *node)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
struct cpuhp_step *step = cpuhp_get_step(state);
- int (*cb)(unsigned int cpu) = bringup ? step->startup : step->teardown;
- int ret = 0;
-
- if (cb) {
+ int (*cbm)(unsigned int cpu, struct hlist_node *node);
+ int (*cb)(unsigned int cpu);
+ int ret, cnt;
+
+ if (!step->multi_instance) {
+ cb = bringup ? step->startup : step->teardown;
+ if (!cb)
+ return 0;
trace_cpuhp_enter(cpu, st->target, state, cb);
ret = cb(cpu);
trace_cpuhp_exit(cpu, st->state, state, ret);
+ return ret;
+ }
+ cbm = bringup ? step->startup_multi : step->teardown_multi;
+ if (!cbm)
+ return 0;
+
+ /* Single invocation for instance add/remove */
+ if (node) {
+ trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
+ ret = cbm(cpu, node);
+ trace_cpuhp_exit(cpu, st->state, state, ret);
+ return ret;
+ }
+
+ /* State transition. Invoke on all instances */
+ cnt = 0;
+ hlist_for_each(node, &step->list) {
+ trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
+ ret = cbm(cpu, node);
+ trace_cpuhp_exit(cpu, st->state, state, ret);
+ if (ret)
+ goto err;
+ cnt++;
+ }
+ return 0;
+err:
+ /* Rollback the instances if one failed */
+ cbm = !bringup ? step->startup_multi : step->teardown_multi;
+ if (!cbm)
+ return ret;
+
+ hlist_for_each(node, &step->list) {
+ if (!cnt--)
+ break;
+ cbm(cpu, node);
}
return ret;
}
@@ -398,7 +448,7 @@ static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
struct cpuhp_step *step = cpuhp_get_step(st->state);
if (!step->skip_onerr)
- cpuhp_invoke_callback(cpu, st->state, true);
+ cpuhp_invoke_callback(cpu, st->state, true, NULL);
}
}
@@ -409,7 +459,7 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
int ret = 0;
for (; st->state > target; st->state--) {
- ret = cpuhp_invoke_callback(cpu, st->state, false);
+ ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
if (ret) {
st->target = prev_state;
undo_cpu_down(cpu, st);
@@ -425,7 +475,7 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
struct cpuhp_step *step = cpuhp_get_step(st->state);
if (!step->skip_onerr)
- cpuhp_invoke_callback(cpu, st->state, false);
+ cpuhp_invoke_callback(cpu, st->state, false, NULL);
}
}
@@ -437,7 +487,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
while (st->state < target) {
st->state++;
- ret = cpuhp_invoke_callback(cpu, st->state, true);
+ ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
if (ret) {
st->target = prev_state;
undo_cpu_up(cpu, st);
@@ -502,11 +552,11 @@ static void cpuhp_thread_fun(unsigned int cpu)
if (st->cb_state < CPUHP_AP_ONLINE) {
local_irq_disable();
ret = cpuhp_invoke_callback(cpu, st->cb_state,
- st->bringup);
+ st->bringup, st->node);
local_irq_enable();
} else {
ret = cpuhp_invoke_callback(cpu, st->cb_state,
- st->bringup);
+ st->bringup, st->node);
}
} else if (st->rollback) {
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
@@ -534,7 +584,8 @@ static void cpuhp_thread_fun(unsigned int cpu)
/* Invoke a single callback on a remote cpu */
static int
-cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup)
+cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
+ struct hlist_node *node)
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
@@ -546,11 +597,12 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup)
* we invoke the thread function directly.
*/
if (!st->thread)
- return cpuhp_invoke_callback(cpu, state, bringup);
+ return cpuhp_invoke_callback(cpu, state, bringup, node);
st->cb_state = state;
st->single = true;
st->bringup = bringup;
+ st->node = node;
/*
* Make sure the above stores are visible before should_run becomes
@@ -726,7 +778,7 @@ static int take_cpu_down(void *_param)
st->state--;
/* Invoke the former CPU_DYING callbacks */
for (; st->state > target; st->state--)
- cpuhp_invoke_callback(cpu, st->state, false);
+ cpuhp_invoke_callback(cpu, st->state, false, NULL);
/* Give up timekeeping duties */
tick_handover_do_timer();
@@ -921,7 +973,7 @@ void notify_cpu_starting(unsigned int cpu)
while (st->state < target) {
st->state++;
- cpuhp_invoke_callback(cpu, st->state, true);
+ cpuhp_invoke_callback(cpu, st->state, true, NULL);
}
}
@@ -1386,7 +1438,8 @@ static int cpuhp_cb_check(enum cpuhp_state state)
static void cpuhp_store_callbacks(enum cpuhp_state state,
const char *name,
int (*startup)(unsigned int cpu),
- int (*teardown)(unsigned int cpu))
+ int (*teardown)(unsigned int cpu),
+ bool multi_instance)
{
/* (Un)Install the callbacks for further cpu hotplug operations */
struct cpuhp_step *sp;
@@ -1396,6 +1449,8 @@ static void cpuhp_store_callbacks(enum cpuhp_state state,
sp->startup = startup;
sp->teardown = teardown;
sp->name = name;
+ sp->multi_instance = multi_instance;
+ INIT_HLIST_HEAD(&sp->list);
mutex_unlock(&cpuhp_state_mutex);
}
@@ -1408,7 +1463,8 @@ static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
* Call the startup/teardown function for a step either on the AP or
* on the current CPU.
*/
-static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup)
+static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
+ struct hlist_node *node)
{
struct cpuhp_step *sp = cpuhp_get_step(state);
int ret;
@@ -1421,11 +1477,11 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup)
*/
#ifdef CONFIG_SMP
if (cpuhp_is_ap_state(state))
- ret = cpuhp_invoke_ap_callback(cpu, state, bringup);
+ ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
else
- ret = cpuhp_invoke_callback(cpu, state, bringup);
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node);
#else
- ret = cpuhp_invoke_callback(cpu, state, bringup);
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node);
#endif
BUG_ON(ret && !bringup);
return ret;
@@ -1436,7 +1492,8 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup)
*
* Note: The teardown callbacks for rollback are not allowed to fail!
*/
-static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state)
+static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
+ struct hlist_node *node)
{
int cpu;
@@ -1450,7 +1507,7 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state)
/* Did we invoke the startup call on that cpu ? */
if (cpustate >= state)
- cpuhp_issue_call(cpu, state, false);
+ cpuhp_issue_call(cpu, state, false, node);
}
}
@@ -1477,6 +1534,52 @@ static int cpuhp_reserve_state(enum cpuhp_state state)
return -ENOSPC;
}
+int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ bool invoke)
+{
+ struct cpuhp_step *sp;
+ int cpu;
+ int ret;
+
+ sp = cpuhp_get_step(state);
+ if (sp->multi_instance == false)
+ return -EINVAL;
+
+ get_online_cpus();
+
+ if (!invoke || !sp->startup_multi)
+ goto add_node;
+
+ /*
+ * Try to call the startup callback for each present cpu
+ * depending on the hotplug state of the cpu.
+ */
+ for_each_present_cpu(cpu) {
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ int cpustate = st->state;
+
+ if (cpustate < state)
+ continue;
+
+ ret = cpuhp_issue_call(cpu, state, true, node);
+ if (ret) {
+ if (sp->teardown_multi)
+ cpuhp_rollback_install(cpu, state, node);
+ goto err;
+ }
+ }
+add_node:
+ ret = 0;
+ mutex_lock(&cpuhp_state_mutex);
+ hlist_add_head(node, &sp->list);
+ mutex_unlock(&cpuhp_state_mutex);
+
+err:
+ put_online_cpus();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
+
/**
* __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
* @state: The state to setup
@@ -1490,7 +1593,8 @@ static int cpuhp_reserve_state(enum cpuhp_state state)
int __cpuhp_setup_state(enum cpuhp_state state,
const char *name, bool invoke,
int (*startup)(unsigned int cpu),
- int (*teardown)(unsigned int cpu))
+ int (*teardown)(unsigned int cpu),
+ bool multi_instance)
{
int cpu, ret = 0;
int dyn_state = 0;
@@ -1509,7 +1613,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
state = ret;
}
- cpuhp_store_callbacks(state, name, startup, teardown);
+ cpuhp_store_callbacks(state, name, startup, teardown, multi_instance);
if (!invoke || !startup)
goto out;
@@ -1525,11 +1629,11 @@ int __cpuhp_setup_state(enum cpuhp_state state,
if (cpustate < state)
continue;
- ret = cpuhp_issue_call(cpu, state, true);
+ ret = cpuhp_issue_call(cpu, state, true, NULL);
if (ret) {
if (teardown)
- cpuhp_rollback_install(cpu, state);
- cpuhp_store_callbacks(state, NULL, NULL, NULL);
+ cpuhp_rollback_install(cpu, state, NULL);
+ cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
goto out;
}
}
@@ -1541,6 +1645,42 @@ out:
}
EXPORT_SYMBOL(__cpuhp_setup_state);
+int __cpuhp_state_remove_instance(enum cpuhp_state state,
+ struct hlist_node *node, bool invoke)
+{
+ struct cpuhp_step *sp = cpuhp_get_step(state);
+ int cpu;
+
+ BUG_ON(cpuhp_cb_check(state));
+
+ if (!sp->multi_instance)
+ return -EINVAL;
+
+ get_online_cpus();
+ if (!invoke || !cpuhp_get_teardown_cb(state))
+ goto remove;
+ /*
+ * Call the teardown callback for each present cpu depending
+ * on the hotplug state of the cpu. This function is not
+ * allowed to fail currently!
+ */
+ for_each_present_cpu(cpu) {
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+ int cpustate = st->state;
+
+ if (cpustate >= state)
+ cpuhp_issue_call(cpu, state, false, node);
+ }
+
+remove:
+ mutex_lock(&cpuhp_state_mutex);
+ hlist_del(node);
+ mutex_unlock(&cpuhp_state_mutex);
+ put_online_cpus();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
/**
* __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
* @state: The state to remove
@@ -1552,12 +1692,20 @@ EXPORT_SYMBOL(__cpuhp_setup_state);
*/
void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
{
+ struct cpuhp_step *sp = cpuhp_get_step(state);
int cpu;
BUG_ON(cpuhp_cb_check(state));
get_online_cpus();
+ if (sp->multi_instance) {
+ WARN(!hlist_empty(&sp->list),
+ "Error: Removing state %d which has instances left.\n",
+ state);
+ goto remove;
+ }
+
if (!invoke || !cpuhp_get_teardown_cb(state))
goto remove;
@@ -1571,10 +1719,10 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
int cpustate = st->state;
if (cpustate >= state)
- cpuhp_issue_call(cpu, state, false);
+ cpuhp_issue_call(cpu, state, false, NULL);
}
remove:
- cpuhp_store_callbacks(state, NULL, NULL, NULL);
+ cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
put_online_cpus();
}
EXPORT_SYMBOL(__cpuhp_remove_state);