Skip to content

Commit 3e339b5

Browse files
committed
softirq: Use hotplug thread infrastructure
[ paulmck: Call rcu_note_context_switch() with interrupts enabled. ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Reviewed-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung@kernel.org> Link: http://lkml.kernel.org/r/20120716103948.456416747@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1 parent 3180d89 commit 3e339b5

File tree

1 file changed

+27
-84
lines changed

1 file changed

+27
-84
lines changed

kernel/softirq.c

Lines changed: 27 additions & 84 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
#include <linux/rcupdate.h>
2424
#include <linux/ftrace.h>
2525
#include <linux/smp.h>
26+
#include <linux/smpboot.h>
2627
#include <linux/tick.h>
2728

2829
#define CREATE_TRACE_POINTS
@@ -742,49 +743,22 @@ void __init softirq_init(void)
742743
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
743744
}
744745

745-
static int run_ksoftirqd(void * __bind_cpu)
746+
static int ksoftirqd_should_run(unsigned int cpu)
746747
{
747-
set_current_state(TASK_INTERRUPTIBLE);
748-
749-
while (!kthread_should_stop()) {
750-
preempt_disable();
751-
if (!local_softirq_pending()) {
752-
schedule_preempt_disabled();
753-
}
754-
755-
__set_current_state(TASK_RUNNING);
756-
757-
while (local_softirq_pending()) {
758-
/* Preempt disable stops cpu going offline.
759-
If already offline, we'll be on wrong CPU:
760-
don't process */
761-
if (cpu_is_offline((long)__bind_cpu))
762-
goto wait_to_die;
763-
local_irq_disable();
764-
if (local_softirq_pending())
765-
__do_softirq();
766-
local_irq_enable();
767-
sched_preempt_enable_no_resched();
768-
cond_resched();
769-
preempt_disable();
770-
rcu_note_context_switch((long)__bind_cpu);
771-
}
772-
preempt_enable();
773-
set_current_state(TASK_INTERRUPTIBLE);
774-
}
775-
__set_current_state(TASK_RUNNING);
776-
return 0;
748+
return local_softirq_pending();
749+
}
777750

778-
wait_to_die:
779-
preempt_enable();
780-
/* Wait for kthread_stop */
781-
set_current_state(TASK_INTERRUPTIBLE);
782-
while (!kthread_should_stop()) {
783-
schedule();
784-
set_current_state(TASK_INTERRUPTIBLE);
751+
static void run_ksoftirqd(unsigned int cpu)
752+
{
753+
local_irq_disable();
754+
if (local_softirq_pending()) {
755+
__do_softirq();
756+
rcu_note_context_switch(cpu);
757+
local_irq_enable();
758+
cond_resched();
759+
return;
785760
}
786-
__set_current_state(TASK_RUNNING);
787-
return 0;
761+
local_irq_enable();
788762
}
789763

790764
#ifdef CONFIG_HOTPLUG_CPU
@@ -850,65 +824,34 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
850824
unsigned long action,
851825
void *hcpu)
852826
{
853-
int hotcpu = (unsigned long)hcpu;
854-
struct task_struct *p;
855-
856827
switch (action) {
857-
case CPU_UP_PREPARE:
858-
case CPU_UP_PREPARE_FROZEN:
859-
p = kthread_create_on_node(run_ksoftirqd,
860-
hcpu,
861-
cpu_to_node(hotcpu),
862-
"ksoftirqd/%d", hotcpu);
863-
if (IS_ERR(p)) {
864-
printk("ksoftirqd for %i failed\n", hotcpu);
865-
return notifier_from_errno(PTR_ERR(p));
866-
}
867-
kthread_bind(p, hotcpu);
868-
per_cpu(ksoftirqd, hotcpu) = p;
869-
break;
870-
case CPU_ONLINE:
871-
case CPU_ONLINE_FROZEN:
872-
wake_up_process(per_cpu(ksoftirqd, hotcpu));
873-
break;
874828
#ifdef CONFIG_HOTPLUG_CPU
875-
case CPU_UP_CANCELED:
876-
case CPU_UP_CANCELED_FROZEN:
877-
if (!per_cpu(ksoftirqd, hotcpu))
878-
break;
879-
/* Unbind so it can run. Fall thru. */
880-
kthread_bind(per_cpu(ksoftirqd, hotcpu),
881-
cpumask_any(cpu_online_mask));
882829
case CPU_DEAD:
883-
case CPU_DEAD_FROZEN: {
884-
static const struct sched_param param = {
885-
.sched_priority = MAX_RT_PRIO-1
886-
};
887-
888-
p = per_cpu(ksoftirqd, hotcpu);
889-
per_cpu(ksoftirqd, hotcpu) = NULL;
890-
sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
891-
kthread_stop(p);
892-
takeover_tasklets(hotcpu);
830+
case CPU_DEAD_FROZEN:
831+
takeover_tasklets((unsigned long)hcpu);
893832
break;
894-
}
895833
#endif /* CONFIG_HOTPLUG_CPU */
896-
}
834+
}
897835
return NOTIFY_OK;
898836
}
899837

900838
static struct notifier_block __cpuinitdata cpu_nfb = {
901839
.notifier_call = cpu_callback
902840
};
903841

842+
static struct smp_hotplug_thread softirq_threads = {
843+
.store = &ksoftirqd,
844+
.thread_should_run = ksoftirqd_should_run,
845+
.thread_fn = run_ksoftirqd,
846+
.thread_comm = "ksoftirqd/%u",
847+
};
848+
904849
static __init int spawn_ksoftirqd(void)
905850
{
906-
void *cpu = (void *)(long)smp_processor_id();
907-
int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
908-
909-
BUG_ON(err != NOTIFY_OK);
910-
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
911851
register_cpu_notifier(&cpu_nfb);
852+
853+
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
854+
912855
return 0;
913856
}
914857
early_initcall(spawn_ksoftirqd);

0 commit comments

Comments
 (0)