@@ -301,13 +301,7 @@ void exit_idle(void)
301
301
}
302
302
#endif
303
303
304
- /*
305
- * The idle thread. There's no useful work to be
306
- * done, so just try to conserve power and have a
307
- * low exit latency (ie sit in a loop waiting for
308
- * somebody to say that they'd like to reschedule)
309
- */
310
- void cpu_idle (void )
304
+ void arch_cpu_idle_prepare (void )
311
305
{
312
306
/*
313
307
* If we're the non-boot CPU, nothing set the stack canary up
@@ -317,71 +311,40 @@ void cpu_idle(void)
317
311
* canaries already on the stack wont ever trigger).
318
312
*/
319
313
boot_init_stack_canary ();
320
- current_thread_info ()-> status |= TS_POLLING ;
321
-
322
- while (1 ) {
323
- tick_nohz_idle_enter ();
324
-
325
- while (!need_resched ()) {
326
- rmb ();
327
-
328
- if (cpu_is_offline (smp_processor_id ()))
329
- play_dead ();
330
-
331
- /*
332
- * Idle routines should keep interrupts disabled
333
- * from here on, until they go to idle.
334
- * Otherwise, idle callbacks can misfire.
335
- */
336
- local_touch_nmi ();
337
- local_irq_disable ();
338
-
339
- enter_idle ();
340
-
341
- /* Don't trace irqs off for idle */
342
- stop_critical_timings ();
343
-
344
- /* enter_idle() needs rcu for notifiers */
345
- rcu_idle_enter ();
314
+ }
346
315
347
- if (cpuidle_idle_call ())
348
- x86_idle ();
316
+ void arch_cpu_idle_enter (void )
317
+ {
318
+ local_touch_nmi ();
319
+ enter_idle ();
320
+ }
349
321
350
- rcu_idle_exit ();
351
- start_critical_timings ();
322
+ void arch_cpu_idle_exit (void )
323
+ {
324
+ __exit_idle ();
325
+ }
352
326
353
- /* In many cases the interrupt that ended idle
354
- has already called exit_idle. But some idle
355
- loops can be woken up without interrupt. */
356
- __exit_idle ();
357
- }
327
+ void arch_cpu_idle_dead (void )
328
+ {
329
+ play_dead ();
330
+ }
358
331
359
- tick_nohz_idle_exit ();
360
- preempt_enable_no_resched ();
361
- schedule ();
362
- preempt_disable ();
363
- }
332
+ /*
333
+ * Called from the generic idle code.
334
+ */
335
+ void arch_cpu_idle (void )
336
+ {
337
+ if (cpuidle_idle_call ())
338
+ x86_idle ();
364
339
}
365
340
366
341
/*
367
- * We use this if we don't have any better
368
- * idle routine..
342
+ * We use this if we don't have any better idle routine..
369
343
*/
370
344
void default_idle (void )
371
345
{
372
346
trace_cpu_idle_rcuidle (1 , smp_processor_id ());
373
- current_thread_info ()-> status &= ~TS_POLLING ;
374
- /*
375
- * TS_POLLING-cleared state must be visible before we
376
- * test NEED_RESCHED:
377
- */
378
- smp_mb ();
379
-
380
- if (!need_resched ())
381
- safe_halt (); /* enables interrupts racelessly */
382
- else
383
- local_irq_enable ();
384
- current_thread_info ()-> status |= TS_POLLING ;
347
+ safe_halt ();
385
348
trace_cpu_idle_rcuidle (PWR_EVENT_EXIT , smp_processor_id ());
386
349
}
387
350
#ifdef CONFIG_APM_MODULE
@@ -411,20 +374,6 @@ void stop_this_cpu(void *dummy)
411
374
halt ();
412
375
}
413
376
414
- /*
415
- * On SMP it's slightly faster (but much more power-consuming!)
416
- * to poll the ->work.need_resched flag instead of waiting for the
417
- * cross-CPU IPI to arrive. Use this option with caution.
418
- */
419
- static void poll_idle (void )
420
- {
421
- trace_cpu_idle_rcuidle (0 , smp_processor_id ());
422
- local_irq_enable ();
423
- while (!need_resched ())
424
- cpu_relax ();
425
- trace_cpu_idle_rcuidle (PWR_EVENT_EXIT , smp_processor_id ());
426
- }
427
-
428
377
bool amd_e400_c1e_detected ;
429
378
EXPORT_SYMBOL (amd_e400_c1e_detected );
430
379
@@ -489,10 +438,10 @@ static void amd_e400_idle(void)
489
438
void __cpuinit select_idle_routine (const struct cpuinfo_x86 * c )
490
439
{
491
440
#ifdef CONFIG_SMP
492
- if (x86_idle == poll_idle && smp_num_siblings > 1 )
441
+ if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1 )
493
442
pr_warn_once ("WARNING: polling idle and HT enabled, performance may degrade\n" );
494
443
#endif
495
- if (x86_idle )
444
+ if (x86_idle || boot_option_idle_override == IDLE_POLL )
496
445
return ;
497
446
498
447
if (cpu_has_amd_erratum (amd_erratum_400 )) {
@@ -517,8 +466,8 @@ static int __init idle_setup(char *str)
517
466
518
467
if (!strcmp (str , "poll" )) {
519
468
pr_info ("using polling idle threads\n" );
520
- x86_idle = poll_idle ;
521
469
boot_option_idle_override = IDLE_POLL ;
470
+ cpu_idle_poll_ctrl (true);
522
471
} else if (!strcmp (str , "halt" )) {
523
472
/*
524
473
* When the boot option of idle=halt is added, halt is
0 commit comments