|
24 | 24 |
|
25 | 25 | static void (*sh_idle)(void);
|
26 | 26 |
|
27 |
| -static int hlt_counter; |
28 |
| - |
29 |
| -static int __init nohlt_setup(char *__unused) |
30 |
| -{ |
31 |
| - hlt_counter = 1; |
32 |
| - return 1; |
33 |
| -} |
34 |
| -__setup("nohlt", nohlt_setup); |
35 |
| - |
36 |
| -static int __init hlt_setup(char *__unused) |
37 |
| -{ |
38 |
| - hlt_counter = 0; |
39 |
| - return 1; |
40 |
| -} |
41 |
| -__setup("hlt", hlt_setup); |
42 |
| - |
43 |
| -static inline int hlt_works(void) |
44 |
| -{ |
45 |
| - return !hlt_counter; |
46 |
| -} |
47 |
| - |
48 |
| -/* |
49 |
| - * On SMP it's slightly faster (but much more power-consuming!) |
50 |
| - * to poll the ->work.need_resched flag instead of waiting for the |
51 |
| - * cross-CPU IPI to arrive. Use this option with caution. |
52 |
| - */ |
53 |
| -static void poll_idle(void) |
| 27 | +void default_idle(void) |
54 | 28 | {
|
| 29 | + set_bl_bit(); |
55 | 30 | local_irq_enable();
|
56 |
| - while (!need_resched()) |
57 |
| - cpu_relax(); |
| 31 | + /* Isn't this racy ? */ |
| 32 | + cpu_sleep(); |
| 33 | + clear_bl_bit(); |
58 | 34 | }
|
59 | 35 |
|
60 |
| -void default_idle(void) |
| 36 | +void arch_cpu_idle_dead(void) |
61 | 37 | {
|
62 |
| - if (hlt_works()) { |
63 |
| - clear_thread_flag(TIF_POLLING_NRFLAG); |
64 |
| - smp_mb__after_clear_bit(); |
65 |
| - |
66 |
| - set_bl_bit(); |
67 |
| - if (!need_resched()) { |
68 |
| - local_irq_enable(); |
69 |
| - cpu_sleep(); |
70 |
| - } else |
71 |
| - local_irq_enable(); |
72 |
| - |
73 |
| - set_thread_flag(TIF_POLLING_NRFLAG); |
74 |
| - clear_bl_bit(); |
75 |
| - } else |
76 |
| - poll_idle(); |
| 38 | + play_dead(); |
77 | 39 | }
|
78 | 40 |
|
79 |
| -/* |
80 |
| - * The idle thread. There's no useful work to be done, so just try to conserve |
81 |
| - * power and have a low exit latency (ie sit in a loop waiting for somebody to |
82 |
| - * say that they'd like to reschedule) |
83 |
| - */ |
84 |
| -void cpu_idle(void) |
| 41 | +void arch_cpu_idle(void) |
85 | 42 | {
|
86 |
| - unsigned int cpu = smp_processor_id(); |
87 |
| - |
88 |
| - set_thread_flag(TIF_POLLING_NRFLAG); |
89 |
| - |
90 |
| - /* endless idle loop with no priority at all */ |
91 |
| - while (1) { |
92 |
| - tick_nohz_idle_enter(); |
93 |
| - rcu_idle_enter(); |
94 |
| - |
95 |
| - while (!need_resched()) { |
96 |
| - check_pgt_cache(); |
97 |
| - rmb(); |
98 |
| - |
99 |
| - if (cpu_is_offline(cpu)) |
100 |
| - play_dead(); |
101 |
| - |
102 |
| - local_irq_disable(); |
103 |
| - /* Don't trace irqs off for idle */ |
104 |
| - stop_critical_timings(); |
105 |
| - if (cpuidle_idle_call()) |
106 |
| - sh_idle(); |
107 |
| - /* |
108 |
| - * Sanity check to ensure that sh_idle() returns |
109 |
| - * with IRQs enabled |
110 |
| - */ |
111 |
| - WARN_ON(irqs_disabled()); |
112 |
| - start_critical_timings(); |
113 |
| - } |
114 |
| - |
115 |
| - rcu_idle_exit(); |
116 |
| - tick_nohz_idle_exit(); |
117 |
| - schedule_preempt_disabled(); |
118 |
| - } |
| 43 | + if (cpuidle_idle_call()) |
| 44 | + sh_idle(); |
119 | 45 | }
|
120 | 46 |
|
121 | 47 | void __init select_idle_routine(void)
|
122 | 48 | {
|
123 | 49 | /*
|
124 | 50 | * If a platform has set its own idle routine, leave it alone.
|
125 | 51 | */
|
126 |
| - if (sh_idle) |
127 |
| - return; |
128 |
| - |
129 |
| - if (hlt_works()) |
| 52 | + if (!sh_idle) |
130 | 53 | sh_idle = default_idle;
|
131 |
| - else |
132 |
| - sh_idle = poll_idle; |
133 | 54 | }
|
134 | 55 |
|
135 | 56 | void stop_this_cpu(void *unused)
|
|
0 commit comments