mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-04-10 16:23:42 +00:00
smp/hotplug: Allow external multi-instance rollback
Currently the rollback of multi-instance states is handled inside cpuhp_invoke_callback(). The problem is that when we want to allow an explicit state change for rollback, we need to return from the function without doing the rollback. Change cpuhp_invoke_callback() to optionally return the multi-instance state, such that rollback can be done from a subsequent call. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: bigeasy@linutronix.de Cc: efault@gmx.de Cc: rostedt@goodmis.org Cc: max.byungchul.park@gmail.com Link: https://lkml.kernel.org/r/20170920170546.720361181@infradead.org
This commit is contained in:
parent
fac1c20402
commit
96abb96854
1 changed files with 32 additions and 15 deletions
45
kernel/cpu.c
45
kernel/cpu.c
|
@ -123,13 +123,16 @@ static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
|
||||||
/**
|
/**
|
||||||
* cpuhp_invoke_callback _ Invoke the callbacks for a given state
|
* cpuhp_invoke_callback _ Invoke the callbacks for a given state
|
||||||
* @cpu: The cpu for which the callback should be invoked
|
* @cpu: The cpu for which the callback should be invoked
|
||||||
* @step: The step in the state machine
|
* @state: The state to do callbacks for
|
||||||
* @bringup: True if the bringup callback should be invoked
|
* @bringup: True if the bringup callback should be invoked
|
||||||
|
* @node: For multi-instance, do a single entry callback for install/remove
|
||||||
|
* @lastp: For multi-instance rollback, remember how far we got
|
||||||
*
|
*
|
||||||
* Called from cpu hotplug and from the state register machinery.
|
* Called from cpu hotplug and from the state register machinery.
|
||||||
*/
|
*/
|
||||||
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
|
static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
|
||||||
bool bringup, struct hlist_node *node)
|
bool bringup, struct hlist_node *node,
|
||||||
|
struct hlist_node **lastp)
|
||||||
{
|
{
|
||||||
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
|
||||||
struct cpuhp_step *step = cpuhp_get_step(state);
|
struct cpuhp_step *step = cpuhp_get_step(state);
|
||||||
|
@ -138,6 +141,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
|
||||||
int ret, cnt;
|
int ret, cnt;
|
||||||
|
|
||||||
if (!step->multi_instance) {
|
if (!step->multi_instance) {
|
||||||
|
WARN_ON_ONCE(lastp && *lastp);
|
||||||
cb = bringup ? step->startup.single : step->teardown.single;
|
cb = bringup ? step->startup.single : step->teardown.single;
|
||||||
if (!cb)
|
if (!cb)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -152,6 +156,7 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
|
||||||
|
|
||||||
/* Single invocation for instance add/remove */
|
/* Single invocation for instance add/remove */
|
||||||
if (node) {
|
if (node) {
|
||||||
|
WARN_ON_ONCE(lastp && *lastp);
|
||||||
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
|
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
|
||||||
ret = cbm(cpu, node);
|
ret = cbm(cpu, node);
|
||||||
trace_cpuhp_exit(cpu, st->state, state, ret);
|
trace_cpuhp_exit(cpu, st->state, state, ret);
|
||||||
|
@ -161,13 +166,23 @@ static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
|
||||||
/* State transition. Invoke on all instances */
|
/* State transition. Invoke on all instances */
|
||||||
cnt = 0;
|
cnt = 0;
|
||||||
hlist_for_each(node, &step->list) {
|
hlist_for_each(node, &step->list) {
|
||||||
|
if (lastp && node == *lastp)
|
||||||
|
break;
|
||||||
|
|
||||||
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
|
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
|
||||||
ret = cbm(cpu, node);
|
ret = cbm(cpu, node);
|
||||||
trace_cpuhp_exit(cpu, st->state, state, ret);
|
trace_cpuhp_exit(cpu, st->state, state, ret);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
if (!lastp)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
*lastp = node;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
cnt++;
|
cnt++;
|
||||||
}
|
}
|
||||||
|
if (lastp)
|
||||||
|
*lastp = NULL;
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
/* Rollback the instances if one failed */
|
/* Rollback the instances if one failed */
|
||||||
|
@ -323,7 +338,7 @@ static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
|
||||||
struct cpuhp_step *step = cpuhp_get_step(st->state);
|
struct cpuhp_step *step = cpuhp_get_step(st->state);
|
||||||
|
|
||||||
if (!step->skip_onerr)
|
if (!step->skip_onerr)
|
||||||
cpuhp_invoke_callback(cpu, st->state, true, NULL);
|
cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -334,7 +349,7 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
for (; st->state > target; st->state--) {
|
for (; st->state > target; st->state--) {
|
||||||
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL);
|
ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
st->target = prev_state;
|
st->target = prev_state;
|
||||||
undo_cpu_down(cpu, st);
|
undo_cpu_down(cpu, st);
|
||||||
|
@ -350,7 +365,7 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
|
||||||
struct cpuhp_step *step = cpuhp_get_step(st->state);
|
struct cpuhp_step *step = cpuhp_get_step(st->state);
|
||||||
|
|
||||||
if (!step->skip_onerr)
|
if (!step->skip_onerr)
|
||||||
cpuhp_invoke_callback(cpu, st->state, false, NULL);
|
cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,7 +377,7 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
|
||||||
|
|
||||||
while (st->state < target) {
|
while (st->state < target) {
|
||||||
st->state++;
|
st->state++;
|
||||||
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL);
|
ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
st->target = prev_state;
|
st->target = prev_state;
|
||||||
undo_cpu_up(cpu, st);
|
undo_cpu_up(cpu, st);
|
||||||
|
@ -428,11 +443,13 @@ static void cpuhp_thread_fun(unsigned int cpu)
|
||||||
if (st->cb_state < CPUHP_AP_ONLINE) {
|
if (st->cb_state < CPUHP_AP_ONLINE) {
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
ret = cpuhp_invoke_callback(cpu, st->cb_state,
|
ret = cpuhp_invoke_callback(cpu, st->cb_state,
|
||||||
st->bringup, st->node);
|
st->bringup, st->node,
|
||||||
|
NULL);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
} else {
|
} else {
|
||||||
ret = cpuhp_invoke_callback(cpu, st->cb_state,
|
ret = cpuhp_invoke_callback(cpu, st->cb_state,
|
||||||
st->bringup, st->node);
|
st->bringup, st->node,
|
||||||
|
NULL);
|
||||||
}
|
}
|
||||||
} else if (st->rollback) {
|
} else if (st->rollback) {
|
||||||
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
|
BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
|
||||||
|
@ -472,7 +489,7 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
|
||||||
* we invoke the thread function directly.
|
* we invoke the thread function directly.
|
||||||
*/
|
*/
|
||||||
if (!st->thread)
|
if (!st->thread)
|
||||||
return cpuhp_invoke_callback(cpu, state, bringup, node);
|
return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
|
||||||
|
|
||||||
st->cb_state = state;
|
st->cb_state = state;
|
||||||
st->single = true;
|
st->single = true;
|
||||||
|
@ -595,7 +612,7 @@ static int take_cpu_down(void *_param)
|
||||||
st->state--;
|
st->state--;
|
||||||
/* Invoke the former CPU_DYING callbacks */
|
/* Invoke the former CPU_DYING callbacks */
|
||||||
for (; st->state > target; st->state--)
|
for (; st->state > target; st->state--)
|
||||||
cpuhp_invoke_callback(cpu, st->state, false, NULL);
|
cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
|
||||||
|
|
||||||
/* Give up timekeeping duties */
|
/* Give up timekeeping duties */
|
||||||
tick_handover_do_timer();
|
tick_handover_do_timer();
|
||||||
|
@ -776,7 +793,7 @@ void notify_cpu_starting(unsigned int cpu)
|
||||||
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
|
rcu_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
|
||||||
while (st->state < target) {
|
while (st->state < target) {
|
||||||
st->state++;
|
st->state++;
|
||||||
cpuhp_invoke_callback(cpu, st->state, true, NULL);
|
cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1307,9 +1324,9 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
|
||||||
if (cpuhp_is_ap_state(state))
|
if (cpuhp_is_ap_state(state))
|
||||||
ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
|
ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
|
||||||
else
|
else
|
||||||
ret = cpuhp_invoke_callback(cpu, state, bringup, node);
|
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
|
||||||
#else
|
#else
|
||||||
ret = cpuhp_invoke_callback(cpu, state, bringup, node);
|
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
|
||||||
#endif
|
#endif
|
||||||
BUG_ON(ret && !bringup);
|
BUG_ON(ret && !bringup);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
Loading…
Add table
Reference in a new issue