Skip to content

Commit 7d1a941

Browse files
committed
x86: Use generic idle loop
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Paul McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Magnus Damm <magnus.damm@gmail.com> Link: http://lkml.kernel.org/r/20130321215235.486594473@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: x86@kernel.org
1 parent aba92c9 commit 7d1a941

File tree

4 files changed

+30
-80
lines changed

4 files changed

+30
-80
lines changed

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,7 @@ config X86
9797
select GENERIC_IOMAP
9898
select DCACHE_WORD_ACCESS
9999
select GENERIC_SMP_IDLE_THREAD
100+
select GENERIC_IDLE_LOOP
100101
select ARCH_WANT_IPC_PARSE_VERSION if X86_32
101102
select HAVE_ARCH_SECCOMP_FILTER
102103
select BUILDTIME_EXTABLE_SORT

arch/x86/kernel/process.c

Lines changed: 27 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -301,13 +301,7 @@ void exit_idle(void)
301301
}
302302
#endif
303303

304-
/*
305-
* The idle thread. There's no useful work to be
306-
* done, so just try to conserve power and have a
307-
* low exit latency (ie sit in a loop waiting for
308-
* somebody to say that they'd like to reschedule)
309-
*/
310-
void cpu_idle(void)
304+
void arch_cpu_idle_prepare(void)
311305
{
312306
/*
313307
* If we're the non-boot CPU, nothing set the stack canary up
@@ -317,71 +311,40 @@ void cpu_idle(void)
317311
* canaries already on the stack wont ever trigger).
318312
*/
319313
boot_init_stack_canary();
320-
current_thread_info()->status |= TS_POLLING;
321-
322-
while (1) {
323-
tick_nohz_idle_enter();
324-
325-
while (!need_resched()) {
326-
rmb();
327-
328-
if (cpu_is_offline(smp_processor_id()))
329-
play_dead();
330-
331-
/*
332-
* Idle routines should keep interrupts disabled
333-
* from here on, until they go to idle.
334-
* Otherwise, idle callbacks can misfire.
335-
*/
336-
local_touch_nmi();
337-
local_irq_disable();
338-
339-
enter_idle();
340-
341-
/* Don't trace irqs off for idle */
342-
stop_critical_timings();
343-
344-
/* enter_idle() needs rcu for notifiers */
345-
rcu_idle_enter();
314+
}
346315

347-
if (cpuidle_idle_call())
348-
x86_idle();
316+
void arch_cpu_idle_enter(void)
317+
{
318+
local_touch_nmi();
319+
enter_idle();
320+
}
349321

350-
rcu_idle_exit();
351-
start_critical_timings();
322+
void arch_cpu_idle_exit(void)
323+
{
324+
__exit_idle();
325+
}
352326

353-
/* In many cases the interrupt that ended idle
354-
has already called exit_idle. But some idle
355-
loops can be woken up without interrupt. */
356-
__exit_idle();
357-
}
327+
void arch_cpu_idle_dead(void)
328+
{
329+
play_dead();
330+
}
358331

359-
tick_nohz_idle_exit();
360-
preempt_enable_no_resched();
361-
schedule();
362-
preempt_disable();
363-
}
332+
/*
333+
* Called from the generic idle code.
334+
*/
335+
void arch_cpu_idle(void)
336+
{
337+
if (cpuidle_idle_call())
338+
x86_idle();
364339
}
365340

366341
/*
367-
* We use this if we don't have any better
368-
* idle routine..
342+
* We use this if we don't have any better idle routine..
369343
*/
370344
void default_idle(void)
371345
{
372346
trace_cpu_idle_rcuidle(1, smp_processor_id());
373-
current_thread_info()->status &= ~TS_POLLING;
374-
/*
375-
* TS_POLLING-cleared state must be visible before we
376-
* test NEED_RESCHED:
377-
*/
378-
smp_mb();
379-
380-
if (!need_resched())
381-
safe_halt(); /* enables interrupts racelessly */
382-
else
383-
local_irq_enable();
384-
current_thread_info()->status |= TS_POLLING;
347+
safe_halt();
385348
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
386349
}
387350
#ifdef CONFIG_APM_MODULE
@@ -411,20 +374,6 @@ void stop_this_cpu(void *dummy)
411374
halt();
412375
}
413376

414-
/*
415-
* On SMP it's slightly faster (but much more power-consuming!)
416-
* to poll the ->work.need_resched flag instead of waiting for the
417-
* cross-CPU IPI to arrive. Use this option with caution.
418-
*/
419-
static void poll_idle(void)
420-
{
421-
trace_cpu_idle_rcuidle(0, smp_processor_id());
422-
local_irq_enable();
423-
while (!need_resched())
424-
cpu_relax();
425-
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
426-
}
427-
428377
bool amd_e400_c1e_detected;
429378
EXPORT_SYMBOL(amd_e400_c1e_detected);
430379

@@ -489,10 +438,10 @@ static void amd_e400_idle(void)
489438
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
490439
{
491440
#ifdef CONFIG_SMP
492-
if (x86_idle == poll_idle && smp_num_siblings > 1)
441+
if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
493442
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
494443
#endif
495-
if (x86_idle)
444+
if (x86_idle || boot_option_idle_override == IDLE_POLL)
496445
return;
497446

498447
if (cpu_has_amd_erratum(amd_erratum_400)) {
@@ -517,8 +466,8 @@ static int __init idle_setup(char *str)
517466

518467
if (!strcmp(str, "poll")) {
519468
pr_info("using polling idle threads\n");
520-
x86_idle = poll_idle;
521469
boot_option_idle_override = IDLE_POLL;
470+
cpu_idle_poll_ctrl(true);
522471
} else if (!strcmp(str, "halt")) {
523472
/*
524473
* When the boot option of idle=halt is added, halt is

arch/x86/kernel/smpboot.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ notrace static void __cpuinit start_secondary(void *unused)
284284
x86_cpuinit.setup_percpu_clockev();
285285

286286
wmb();
287-
cpu_idle();
287+
cpu_startup_entry(CPUHP_ONLINE);
288288
}
289289

290290
void __init smp_store_boot_cpu_info(void)

arch/x86/xen/smp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ static void __cpuinit cpu_bringup(void)
9595
static void __cpuinit cpu_bringup_and_idle(void)
9696
{
9797
cpu_bringup();
98-
cpu_idle();
98+
cpu_startup_entry(CPUHP_ONLINE);
9999
}
100100

101101
static int xen_smp_intr_init(unsigned int cpu)

0 commit comments

Comments
 (0)