Skip to content

Commit a92057e

Browse files
author
Ingo Molnar
committed
sched/idle: Merge kernel/sched/idle.c and kernel/sched/idle_task.c
Merge these two small .c modules as they implement two aspects of idle task handling. Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 325ea10 commit a92057e

File tree

3 files changed

+125
-120
lines changed

3 files changed

+125
-120
lines changed

kernel/sched/Makefile

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,9 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
1717
endif
1818

1919
obj-y += core.o loadavg.o clock.o cputime.o
20-
obj-y += idle_task.o fair.o rt.o deadline.o
21-
obj-y += wait.o wait_bit.o swait.o completion.o idle.o
20+
obj-y += idle.o fair.o rt.o deadline.o
21+
obj-y += wait.o wait_bit.o swait.o completion.o
22+
2223
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
2324
obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
2425
obj-$(CONFIG_SCHEDSTATS) += stats.o

kernel/sched/idle.c

Lines changed: 122 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
/*
2-
* Generic entry points for the idle threads
2+
* Generic entry points for the idle threads and
3+
* implementation of the idle task scheduling class.
4+
*
5+
* (NOTE: these are not related to SCHED_IDLE batch scheduled
6+
* tasks which are handled in sched/fair.c )
37
*/
48
#include "sched.h"
59

@@ -33,13 +37,15 @@ void cpu_idle_poll_ctrl(bool enable)
3337
static int __init cpu_idle_poll_setup(char *__unused)
3438
{
3539
cpu_idle_force_poll = 1;
40+
3641
return 1;
3742
}
3843
__setup("nohlt", cpu_idle_poll_setup);
3944

4045
static int __init cpu_idle_nopoll_setup(char *__unused)
4146
{
4247
cpu_idle_force_poll = 0;
48+
4349
return 1;
4450
}
4551
__setup("hlt", cpu_idle_nopoll_setup);
@@ -51,12 +57,14 @@ static noinline int __cpuidle cpu_idle_poll(void)
5157
trace_cpu_idle_rcuidle(0, smp_processor_id());
5258
local_irq_enable();
5359
stop_critical_timings();
60+
5461
while (!tif_need_resched() &&
5562
(cpu_idle_force_poll || tick_check_broadcast_expired()))
5663
cpu_relax();
5764
start_critical_timings();
5865
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
5966
rcu_idle_exit();
67+
6068
return 1;
6169
}
6270

@@ -337,3 +345,116 @@ void cpu_startup_entry(enum cpuhp_state state)
337345
while (1)
338346
do_idle();
339347
}
348+
349+
/*
350+
* idle-task scheduling class.
351+
*/
352+
353+
#ifdef CONFIG_SMP
354+
static int
355+
select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
356+
{
357+
return task_cpu(p); /* IDLE tasks as never migrated */
358+
}
359+
#endif
360+
361+
/*
362+
* Idle tasks are unconditionally rescheduled:
363+
*/
364+
static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
365+
{
366+
resched_curr(rq);
367+
}
368+
369+
static struct task_struct *
370+
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
371+
{
372+
put_prev_task(rq, prev);
373+
update_idle_core(rq);
374+
schedstat_inc(rq->sched_goidle);
375+
376+
return rq->idle;
377+
}
378+
379+
/*
380+
* It is not legal to sleep in the idle task - print a warning
381+
* message if some code attempts to do it:
382+
*/
383+
static void
384+
dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
385+
{
386+
raw_spin_unlock_irq(&rq->lock);
387+
printk(KERN_ERR "bad: scheduling from the idle thread!\n");
388+
dump_stack();
389+
raw_spin_lock_irq(&rq->lock);
390+
}
391+
392+
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
393+
{
394+
}
395+
396+
/*
397+
* scheduler tick hitting a task of our scheduling class.
398+
*
399+
* NOTE: This function can be called remotely by the tick offload that
400+
* goes along full dynticks. Therefore no local assumption can be made
401+
* and everything must be accessed through the @rq and @curr passed in
402+
* parameters.
403+
*/
404+
static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
405+
{
406+
}
407+
408+
static void set_curr_task_idle(struct rq *rq)
409+
{
410+
}
411+
412+
static void switched_to_idle(struct rq *rq, struct task_struct *p)
413+
{
414+
BUG();
415+
}
416+
417+
static void
418+
prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
419+
{
420+
BUG();
421+
}
422+
423+
static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
424+
{
425+
return 0;
426+
}
427+
428+
static void update_curr_idle(struct rq *rq)
429+
{
430+
}
431+
432+
/*
433+
* Simple, special scheduling class for the per-CPU idle tasks:
434+
*/
435+
const struct sched_class idle_sched_class = {
436+
/* .next is NULL */
437+
/* no enqueue/yield_task for idle tasks */
438+
439+
/* dequeue is not valid, we print a debug message there: */
440+
.dequeue_task = dequeue_task_idle,
441+
442+
.check_preempt_curr = check_preempt_curr_idle,
443+
444+
.pick_next_task = pick_next_task_idle,
445+
.put_prev_task = put_prev_task_idle,
446+
447+
#ifdef CONFIG_SMP
448+
.select_task_rq = select_task_rq_idle,
449+
.set_cpus_allowed = set_cpus_allowed_common,
450+
#endif
451+
452+
.set_curr_task = set_curr_task_idle,
453+
.task_tick = task_tick_idle,
454+
455+
.get_rr_interval = get_rr_interval_idle,
456+
457+
.prio_changed = prio_changed_idle,
458+
.switched_to = switched_to_idle,
459+
.update_curr = update_curr_idle,
460+
};

kernel/sched/idle_task.c

Lines changed: 0 additions & 117 deletions
This file was deleted.

0 commit comments

Comments
 (0)