Skip to content

Commit 2e1a348

Browse files
committed
cpu/hotplug: Split out the state walk into functions
We need that for running callbacks on the AP and the BP. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: Rik van Riel <riel@redhat.com> Cc: Rafael Wysocki <rafael.j.wysocki@intel.com> Cc: "Srivatsa S. Bhat" <srivatsa@mit.edu> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Sebastian Siewior <bigeasy@linutronix.de> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Turner <pjt@google.com> Link: http://lkml.kernel.org/r/20160226182341.374946234@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 parent 931ef16 commit 2e1a348

File tree

1 file changed

+68
-43
lines changed

1 file changed

+68
-43
lines changed

kernel/cpu.c

Lines changed: 68 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -329,10 +329,74 @@ static int bringup_cpu(unsigned int cpu)
329329
return 0;
330330
}
331331

332+
/*
333+
* Hotplug state machine related functions
334+
*/
335+
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st,
336+
struct cpuhp_step *steps)
337+
{
338+
for (st->state++; st->state < st->target; st->state++) {
339+
struct cpuhp_step *step = steps + st->state;
340+
341+
if (!step->skip_onerr)
342+
cpuhp_invoke_callback(cpu, st->state, step->startup);
343+
}
344+
}
345+
346+
static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
347+
struct cpuhp_step *steps, enum cpuhp_state target)
348+
{
349+
enum cpuhp_state prev_state = st->state;
350+
int ret = 0;
351+
352+
for (; st->state > target; st->state--) {
353+
struct cpuhp_step *step = steps + st->state;
354+
355+
ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
356+
if (ret) {
357+
st->target = prev_state;
358+
undo_cpu_down(cpu, st, steps);
359+
break;
360+
}
361+
}
362+
return ret;
363+
}
364+
365+
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st,
366+
struct cpuhp_step *steps)
367+
{
368+
for (st->state--; st->state > st->target; st->state--) {
369+
struct cpuhp_step *step = steps + st->state;
370+
371+
if (!step->skip_onerr)
372+
cpuhp_invoke_callback(cpu, st->state, step->teardown);
373+
}
374+
}
375+
376+
static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
377+
struct cpuhp_step *steps, enum cpuhp_state target)
378+
{
379+
enum cpuhp_state prev_state = st->state;
380+
int ret = 0;
381+
382+
while (st->state < target) {
383+
struct cpuhp_step *step;
384+
385+
st->state++;
386+
step = steps + st->state;
387+
ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
388+
if (ret) {
389+
st->target = prev_state;
390+
undo_cpu_up(cpu, st, steps);
391+
break;
392+
}
393+
}
394+
return ret;
395+
}
396+
332397
#ifdef CONFIG_HOTPLUG_CPU
333398
EXPORT_SYMBOL(register_cpu_notifier);
334399
EXPORT_SYMBOL(__register_cpu_notifier);
335-
336400
void unregister_cpu_notifier(struct notifier_block *nb)
337401
{
338402
cpu_maps_update_begin();
@@ -537,15 +601,6 @@ static int notify_dead(unsigned int cpu)
537601
#endif
538602

539603
#ifdef CONFIG_HOTPLUG_CPU
540-
static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
541-
{
542-
for (st->state++; st->state < st->target; st->state++) {
543-
struct cpuhp_step *step = cpuhp_bp_states + st->state;
544-
545-
if (!step->skip_onerr)
546-
cpuhp_invoke_callback(cpu, st->state, step->startup);
547-
}
548-
}
549604

550605
/* Requires cpu_add_remove_lock to be held */
551606
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
@@ -567,16 +622,8 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
567622

568623
prev_state = st->state;
569624
st->target = target;
570-
for (; st->state > st->target; st->state--) {
571-
struct cpuhp_step *step = cpuhp_bp_states + st->state;
625+
ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
572626

573-
ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
574-
if (ret) {
575-
st->target = prev_state;
576-
undo_cpu_down(cpu, st);
577-
break;
578-
}
579-
}
580627
hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
581628

582629
cpu_hotplug_done();
@@ -645,22 +692,12 @@ static int cpuhp_set_cpu_active(unsigned int cpu)
645692
return 0;
646693
}
647694

648-
static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
649-
{
650-
for (st->state--; st->state > st->target; st->state--) {
651-
struct cpuhp_step *step = cpuhp_bp_states + st->state;
652-
653-
if (!step->skip_onerr)
654-
cpuhp_invoke_callback(cpu, st->state, step->teardown);
655-
}
656-
}
657-
658695
/* Requires cpu_add_remove_lock to be held */
659696
static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
660697
{
661698
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
662699
struct task_struct *idle;
663-
int prev_state, ret = 0;
700+
int ret = 0;
664701

665702
cpu_hotplug_begin();
666703

@@ -687,20 +724,8 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
687724

688725
cpuhp_tasks_frozen = tasks_frozen;
689726

690-
prev_state = st->state;
691727
st->target = target;
692-
while (st->state < st->target) {
693-
struct cpuhp_step *step;
694-
695-
st->state++;
696-
step = cpuhp_bp_states + st->state;
697-
ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
698-
if (ret) {
699-
st->target = prev_state;
700-
undo_cpu_up(cpu, st);
701-
break;
702-
}
703-
}
728+
ret = cpuhp_up_callbacks(cpu, st, cpuhp_bp_states, target);
704729
out:
705730
cpu_hotplug_done();
706731
return ret;

0 commit comments

Comments
 (0)