1
1
/*
2
- * Generic entry points for the idle threads
2
+ * Generic entry points for the idle threads and
3
+ * implementation of the idle task scheduling class.
4
+ *
5
+ * (NOTE: these are not related to SCHED_IDLE batch scheduled
6
+ * tasks which are handled in sched/fair.c )
3
7
*/
4
8
#include "sched.h"
5
9
@@ -33,13 +37,15 @@ void cpu_idle_poll_ctrl(bool enable)
33
37
static int __init cpu_idle_poll_setup (char * __unused )
34
38
{
35
39
cpu_idle_force_poll = 1 ;
40
+
36
41
return 1 ;
37
42
}
38
43
__setup ("nohlt" , cpu_idle_poll_setup );
39
44
40
45
static int __init cpu_idle_nopoll_setup (char * __unused )
41
46
{
42
47
cpu_idle_force_poll = 0 ;
48
+
43
49
return 1 ;
44
50
}
45
51
__setup ("hlt" , cpu_idle_nopoll_setup );
@@ -51,12 +57,14 @@ static noinline int __cpuidle cpu_idle_poll(void)
51
57
trace_cpu_idle_rcuidle (0 , smp_processor_id ());
52
58
local_irq_enable ();
53
59
stop_critical_timings ();
60
+
54
61
while (!tif_need_resched () &&
55
62
(cpu_idle_force_poll || tick_check_broadcast_expired ()))
56
63
cpu_relax ();
57
64
start_critical_timings ();
58
65
trace_cpu_idle_rcuidle (PWR_EVENT_EXIT , smp_processor_id ());
59
66
rcu_idle_exit ();
67
+
60
68
return 1 ;
61
69
}
62
70
@@ -337,3 +345,116 @@ void cpu_startup_entry(enum cpuhp_state state)
337
345
while (1 )
338
346
do_idle ();
339
347
}
348
+
349
+ /*
350
+ * idle-task scheduling class.
351
+ */
352
+
353
+ #ifdef CONFIG_SMP
354
+ static int
355
+ select_task_rq_idle (struct task_struct * p , int cpu , int sd_flag , int flags )
356
+ {
357
+ return task_cpu (p ); /* IDLE tasks as never migrated */
358
+ }
359
+ #endif
360
+
361
+ /*
362
+ * Idle tasks are unconditionally rescheduled:
363
+ */
364
+ static void check_preempt_curr_idle (struct rq * rq , struct task_struct * p , int flags )
365
+ {
366
+ resched_curr (rq );
367
+ }
368
+
369
+ static struct task_struct *
370
+ pick_next_task_idle (struct rq * rq , struct task_struct * prev , struct rq_flags * rf )
371
+ {
372
+ put_prev_task (rq , prev );
373
+ update_idle_core (rq );
374
+ schedstat_inc (rq -> sched_goidle );
375
+
376
+ return rq -> idle ;
377
+ }
378
+
379
+ /*
380
+ * It is not legal to sleep in the idle task - print a warning
381
+ * message if some code attempts to do it:
382
+ */
383
+ static void
384
+ dequeue_task_idle (struct rq * rq , struct task_struct * p , int flags )
385
+ {
386
+ raw_spin_unlock_irq (& rq -> lock );
387
+ printk (KERN_ERR "bad: scheduling from the idle thread!\n" );
388
+ dump_stack ();
389
+ raw_spin_lock_irq (& rq -> lock );
390
+ }
391
+
392
+ static void put_prev_task_idle (struct rq * rq , struct task_struct * prev )
393
+ {
394
+ }
395
+
396
+ /*
397
+ * scheduler tick hitting a task of our scheduling class.
398
+ *
399
+ * NOTE: This function can be called remotely by the tick offload that
400
+ * goes along full dynticks. Therefore no local assumption can be made
401
+ * and everything must be accessed through the @rq and @curr passed in
402
+ * parameters.
403
+ */
404
+ static void task_tick_idle (struct rq * rq , struct task_struct * curr , int queued )
405
+ {
406
+ }
407
+
408
+ static void set_curr_task_idle (struct rq * rq )
409
+ {
410
+ }
411
+
412
+ static void switched_to_idle (struct rq * rq , struct task_struct * p )
413
+ {
414
+ BUG ();
415
+ }
416
+
417
+ static void
418
+ prio_changed_idle (struct rq * rq , struct task_struct * p , int oldprio )
419
+ {
420
+ BUG ();
421
+ }
422
+
423
+ static unsigned int get_rr_interval_idle (struct rq * rq , struct task_struct * task )
424
+ {
425
+ return 0 ;
426
+ }
427
+
428
+ static void update_curr_idle (struct rq * rq )
429
+ {
430
+ }
431
+
432
+ /*
433
+ * Simple, special scheduling class for the per-CPU idle tasks:
434
+ */
435
+ const struct sched_class idle_sched_class = {
436
+ /* .next is NULL */
437
+ /* no enqueue/yield_task for idle tasks */
438
+
439
+ /* dequeue is not valid, we print a debug message there: */
440
+ .dequeue_task = dequeue_task_idle ,
441
+
442
+ .check_preempt_curr = check_preempt_curr_idle ,
443
+
444
+ .pick_next_task = pick_next_task_idle ,
445
+ .put_prev_task = put_prev_task_idle ,
446
+
447
+ #ifdef CONFIG_SMP
448
+ .select_task_rq = select_task_rq_idle ,
449
+ .set_cpus_allowed = set_cpus_allowed_common ,
450
+ #endif
451
+
452
+ .set_curr_task = set_curr_task_idle ,
453
+ .task_tick = task_tick_idle ,
454
+
455
+ .get_rr_interval = get_rr_interval_idle ,
456
+
457
+ .prio_changed = prio_changed_idle ,
458
+ .switched_to = switched_to_idle ,
459
+ .update_curr = update_curr_idle ,
460
+ };
0 commit comments