|
23 | 23 | #include <linux/rcupdate.h>
|
24 | 24 | #include <linux/ftrace.h>
|
25 | 25 | #include <linux/smp.h>
|
| 26 | +#include <linux/smpboot.h> |
26 | 27 | #include <linux/tick.h>
|
27 | 28 |
|
28 | 29 | #define CREATE_TRACE_POINTS
|
@@ -742,49 +743,22 @@ void __init softirq_init(void)
|
742 | 743 | open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
743 | 744 | }
|
744 | 745 |
|
745 |
| -static int run_ksoftirqd(void * __bind_cpu) |
| 746 | +static int ksoftirqd_should_run(unsigned int cpu) |
746 | 747 | {
|
747 |
| - set_current_state(TASK_INTERRUPTIBLE); |
748 |
| - |
749 |
| - while (!kthread_should_stop()) { |
750 |
| - preempt_disable(); |
751 |
| - if (!local_softirq_pending()) { |
752 |
| - schedule_preempt_disabled(); |
753 |
| - } |
754 |
| - |
755 |
| - __set_current_state(TASK_RUNNING); |
756 |
| - |
757 |
| - while (local_softirq_pending()) { |
758 |
| - /* Preempt disable stops cpu going offline. |
759 |
| - If already offline, we'll be on wrong CPU: |
760 |
| - don't process */ |
761 |
| - if (cpu_is_offline((long)__bind_cpu)) |
762 |
| - goto wait_to_die; |
763 |
| - local_irq_disable(); |
764 |
| - if (local_softirq_pending()) |
765 |
| - __do_softirq(); |
766 |
| - local_irq_enable(); |
767 |
| - sched_preempt_enable_no_resched(); |
768 |
| - cond_resched(); |
769 |
| - preempt_disable(); |
770 |
| - rcu_note_context_switch((long)__bind_cpu); |
771 |
| - } |
772 |
| - preempt_enable(); |
773 |
| - set_current_state(TASK_INTERRUPTIBLE); |
774 |
| - } |
775 |
| - __set_current_state(TASK_RUNNING); |
776 |
| - return 0; |
| 748 | + return local_softirq_pending(); |
| 749 | +} |
777 | 750 |
|
778 |
| -wait_to_die: |
779 |
| - preempt_enable(); |
780 |
| - /* Wait for kthread_stop */ |
781 |
| - set_current_state(TASK_INTERRUPTIBLE); |
782 |
| - while (!kthread_should_stop()) { |
783 |
| - schedule(); |
784 |
| - set_current_state(TASK_INTERRUPTIBLE); |
| 751 | +static void run_ksoftirqd(unsigned int cpu) |
| 752 | +{ |
| 753 | + local_irq_disable(); |
| 754 | + if (local_softirq_pending()) { |
| 755 | + __do_softirq(); |
| 756 | + rcu_note_context_switch(cpu); |
| 757 | + local_irq_enable(); |
| 758 | + cond_resched(); |
| 759 | + return; |
785 | 760 | }
|
786 |
| - __set_current_state(TASK_RUNNING); |
787 |
| - return 0; |
| 761 | + local_irq_enable(); |
788 | 762 | }
|
789 | 763 |
|
790 | 764 | #ifdef CONFIG_HOTPLUG_CPU
|
@@ -850,65 +824,34 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
|
850 | 824 | unsigned long action,
|
851 | 825 | void *hcpu)
|
852 | 826 | {
|
853 |
| - int hotcpu = (unsigned long)hcpu; |
854 |
| - struct task_struct *p; |
855 |
| - |
856 | 827 | switch (action) {
|
857 |
| - case CPU_UP_PREPARE: |
858 |
| - case CPU_UP_PREPARE_FROZEN: |
859 |
| - p = kthread_create_on_node(run_ksoftirqd, |
860 |
| - hcpu, |
861 |
| - cpu_to_node(hotcpu), |
862 |
| - "ksoftirqd/%d", hotcpu); |
863 |
| - if (IS_ERR(p)) { |
864 |
| - printk("ksoftirqd for %i failed\n", hotcpu); |
865 |
| - return notifier_from_errno(PTR_ERR(p)); |
866 |
| - } |
867 |
| - kthread_bind(p, hotcpu); |
868 |
| - per_cpu(ksoftirqd, hotcpu) = p; |
869 |
| - break; |
870 |
| - case CPU_ONLINE: |
871 |
| - case CPU_ONLINE_FROZEN: |
872 |
| - wake_up_process(per_cpu(ksoftirqd, hotcpu)); |
873 |
| - break; |
874 | 828 | #ifdef CONFIG_HOTPLUG_CPU
|
875 |
| - case CPU_UP_CANCELED: |
876 |
| - case CPU_UP_CANCELED_FROZEN: |
877 |
| - if (!per_cpu(ksoftirqd, hotcpu)) |
878 |
| - break; |
879 |
| - /* Unbind so it can run. Fall thru. */ |
880 |
| - kthread_bind(per_cpu(ksoftirqd, hotcpu), |
881 |
| - cpumask_any(cpu_online_mask)); |
882 | 829 | case CPU_DEAD:
|
883 |
| - case CPU_DEAD_FROZEN: { |
884 |
| - static const struct sched_param param = { |
885 |
| - .sched_priority = MAX_RT_PRIO-1 |
886 |
| - }; |
887 |
| - |
888 |
| - p = per_cpu(ksoftirqd, hotcpu); |
889 |
| - per_cpu(ksoftirqd, hotcpu) = NULL; |
890 |
| - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
891 |
| - kthread_stop(p); |
892 |
| - takeover_tasklets(hotcpu); |
| 830 | + case CPU_DEAD_FROZEN: |
| 831 | + takeover_tasklets((unsigned long)hcpu); |
893 | 832 | break;
|
894 |
| - } |
895 | 833 | #endif /* CONFIG_HOTPLUG_CPU */
|
896 |
| - } |
| 834 | + } |
897 | 835 | return NOTIFY_OK;
|
898 | 836 | }
|
899 | 837 |
|
900 | 838 | static struct notifier_block __cpuinitdata cpu_nfb = {
|
901 | 839 | .notifier_call = cpu_callback
|
902 | 840 | };
|
903 | 841 |
|
| 842 | +static struct smp_hotplug_thread softirq_threads = { |
| 843 | + .store = &ksoftirqd, |
| 844 | + .thread_should_run = ksoftirqd_should_run, |
| 845 | + .thread_fn = run_ksoftirqd, |
| 846 | + .thread_comm = "ksoftirqd/%u", |
| 847 | +}; |
| 848 | + |
904 | 849 | static __init int spawn_ksoftirqd(void)
|
905 | 850 | {
|
906 |
| - void *cpu = (void *)(long)smp_processor_id(); |
907 |
| - int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
908 |
| - |
909 |
| - BUG_ON(err != NOTIFY_OK); |
910 |
| - cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
911 | 851 | register_cpu_notifier(&cpu_nfb);
|
| 852 | + |
| 853 | + BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
| 854 | + |
912 | 855 | return 0;
|
913 | 856 | }
|
914 | 857 | early_initcall(spawn_ksoftirqd);
|
|
0 commit comments