Skip to content

Commit da0c1e6

Browse files
Kirill TkhaiIngo Molnar
authored andcommitted
sched: Add wrapper for checking task_struct::on_rq
Implement task_on_rq_queued() and use it everywhere instead of on_rq check. No functional changes. The only exception is we do not use the wrapper in check_for_tasks(), because it requires to export task_on_rq_queued() in global header files. Next patch in series would return it back, so we do not twist it from here to there. Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Turner <pjt@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Mike Galbraith <umgwanakikbuti@gmail.com> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Nicolas Pitre <nicolas.pitre@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1408528052.23412.87.camel@tkhai Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent f36c019 commit da0c1e6

File tree

6 files changed

+76
-68
lines changed

6 files changed

+76
-68
lines changed

kernel/sched/core.c

Lines changed: 41 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -1043,7 +1043,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
10431043
* A queue event has occurred, and we're going to schedule. In
10441044
* this case, we can save a useless back to back clock update.
10451045
*/
1046-
if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1046+
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
10471047
rq->skip_clock_update = 1;
10481048
}
10491049

@@ -1088,7 +1088,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
10881088

10891089
static void __migrate_swap_task(struct task_struct *p, int cpu)
10901090
{
1091-
if (p->on_rq) {
1091+
if (task_on_rq_queued(p)) {
10921092
struct rq *src_rq, *dst_rq;
10931093

10941094
src_rq = task_rq(p);
@@ -1214,7 +1214,7 @@ static int migration_cpu_stop(void *data);
12141214
unsigned long wait_task_inactive(struct task_struct *p, long match_state)
12151215
{
12161216
unsigned long flags;
1217-
int running, on_rq;
1217+
int running, queued;
12181218
unsigned long ncsw;
12191219
struct rq *rq;
12201220

@@ -1252,7 +1252,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
12521252
rq = task_rq_lock(p, &flags);
12531253
trace_sched_wait_task(p);
12541254
running = task_running(rq, p);
1255-
on_rq = p->on_rq;
1255+
queued = task_on_rq_queued(p);
12561256
ncsw = 0;
12571257
if (!match_state || p->state == match_state)
12581258
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
@@ -1284,7 +1284,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
12841284
* running right now), it's preempted, and we should
12851285
* yield - it could be a while.
12861286
*/
1287-
if (unlikely(on_rq)) {
1287+
if (unlikely(queued)) {
12881288
ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
12891289

12901290
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1478,7 +1478,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
14781478
static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
14791479
{
14801480
activate_task(rq, p, en_flags);
1481-
p->on_rq = 1;
1481+
p->on_rq = TASK_ON_RQ_QUEUED;
14821482

14831483
/* if a worker is waking up, notify workqueue */
14841484
if (p->flags & PF_WQ_WORKER)
@@ -1537,7 +1537,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
15371537
int ret = 0;
15381538

15391539
rq = __task_rq_lock(p);
1540-
if (p->on_rq) {
1540+
if (task_on_rq_queued(p)) {
15411541
/* check_preempt_curr() may use rq clock */
15421542
update_rq_clock(rq);
15431543
ttwu_do_wakeup(rq, p, wake_flags);
@@ -1678,7 +1678,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
16781678
success = 1; /* we're going to change ->state */
16791679
cpu = task_cpu(p);
16801680

1681-
if (p->on_rq && ttwu_remote(p, wake_flags))
1681+
if (task_on_rq_queued(p) && ttwu_remote(p, wake_flags))
16821682
goto stat;
16831683

16841684
#ifdef CONFIG_SMP
@@ -1742,7 +1742,7 @@ static void try_to_wake_up_local(struct task_struct *p)
17421742
if (!(p->state & TASK_NORMAL))
17431743
goto out;
17441744

1745-
if (!p->on_rq)
1745+
if (!task_on_rq_queued(p))
17461746
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
17471747

17481748
ttwu_do_wakeup(rq, p, 0);
@@ -2095,7 +2095,7 @@ void wake_up_new_task(struct task_struct *p)
20952095
init_task_runnable_average(p);
20962096
rq = __task_rq_lock(p);
20972097
activate_task(rq, p, 0);
2098-
p->on_rq = 1;
2098+
p->on_rq = TASK_ON_RQ_QUEUED;
20992099
trace_sched_wakeup_new(p, true);
21002100
check_preempt_curr(rq, p, WF_FORK);
21012101
#ifdef CONFIG_SMP
@@ -2444,7 +2444,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
24442444
* project cycles that may never be accounted to this
24452445
* thread, breaking clock_gettime().
24462446
*/
2447-
if (task_current(rq, p) && p->on_rq) {
2447+
if (task_current(rq, p) && task_on_rq_queued(p)) {
24482448
update_rq_clock(rq);
24492449
ns = rq_clock_task(rq) - p->se.exec_start;
24502450
if ((s64)ns < 0)
@@ -2490,7 +2490,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
24902490
* If we see ->on_cpu without ->on_rq, the task is leaving, and has
24912491
* been accounted, so we're correct here as well.
24922492
*/
2493-
if (!p->on_cpu || !p->on_rq)
2493+
if (!p->on_cpu || !task_on_rq_queued(p))
24942494
return p->se.sum_exec_runtime;
24952495
#endif
24962496

@@ -2794,7 +2794,7 @@ static void __sched __schedule(void)
27942794
switch_count = &prev->nvcsw;
27952795
}
27962796

2797-
if (prev->on_rq || rq->skip_clock_update < 0)
2797+
if (task_on_rq_queued(prev) || rq->skip_clock_update < 0)
27982798
update_rq_clock(rq);
27992799

28002800
next = pick_next_task(rq, prev);
@@ -2959,7 +2959,7 @@ EXPORT_SYMBOL(default_wake_function);
29592959
*/
29602960
void rt_mutex_setprio(struct task_struct *p, int prio)
29612961
{
2962-
int oldprio, on_rq, running, enqueue_flag = 0;
2962+
int oldprio, queued, running, enqueue_flag = 0;
29632963
struct rq *rq;
29642964
const struct sched_class *prev_class;
29652965

@@ -2988,9 +2988,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
29882988
trace_sched_pi_setprio(p, prio);
29892989
oldprio = p->prio;
29902990
prev_class = p->sched_class;
2991-
on_rq = p->on_rq;
2991+
queued = task_on_rq_queued(p);
29922992
running = task_current(rq, p);
2993-
if (on_rq)
2993+
if (queued)
29942994
dequeue_task(rq, p, 0);
29952995
if (running)
29962996
p->sched_class->put_prev_task(rq, p);
@@ -3030,7 +3030,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
30303030

30313031
if (running)
30323032
p->sched_class->set_curr_task(rq);
3033-
if (on_rq)
3033+
if (queued)
30343034
enqueue_task(rq, p, enqueue_flag);
30353035

30363036
check_class_changed(rq, p, prev_class, oldprio);
@@ -3041,7 +3041,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
30413041

30423042
void set_user_nice(struct task_struct *p, long nice)
30433043
{
3044-
int old_prio, delta, on_rq;
3044+
int old_prio, delta, queued;
30453045
unsigned long flags;
30463046
struct rq *rq;
30473047

@@ -3062,8 +3062,8 @@ void set_user_nice(struct task_struct *p, long nice)
30623062
p->static_prio = NICE_TO_PRIO(nice);
30633063
goto out_unlock;
30643064
}
3065-
on_rq = p->on_rq;
3066-
if (on_rq)
3065+
queued = task_on_rq_queued(p);
3066+
if (queued)
30673067
dequeue_task(rq, p, 0);
30683068

30693069
p->static_prio = NICE_TO_PRIO(nice);
@@ -3072,7 +3072,7 @@ void set_user_nice(struct task_struct *p, long nice)
30723072
p->prio = effective_prio(p);
30733073
delta = p->prio - old_prio;
30743074

3075-
if (on_rq) {
3075+
if (queued) {
30763076
enqueue_task(rq, p, 0);
30773077
/*
30783078
* If the task increased its priority or is running and
@@ -3344,7 +3344,7 @@ static int __sched_setscheduler(struct task_struct *p,
33443344
{
33453345
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
33463346
MAX_RT_PRIO - 1 - attr->sched_priority;
3347-
int retval, oldprio, oldpolicy = -1, on_rq, running;
3347+
int retval, oldprio, oldpolicy = -1, queued, running;
33483348
int policy = attr->sched_policy;
33493349
unsigned long flags;
33503350
const struct sched_class *prev_class;
@@ -3541,9 +3541,9 @@ static int __sched_setscheduler(struct task_struct *p,
35413541
return 0;
35423542
}
35433543

3544-
on_rq = p->on_rq;
3544+
queued = task_on_rq_queued(p);
35453545
running = task_current(rq, p);
3546-
if (on_rq)
3546+
if (queued)
35473547
dequeue_task(rq, p, 0);
35483548
if (running)
35493549
p->sched_class->put_prev_task(rq, p);
@@ -3553,7 +3553,7 @@ static int __sched_setscheduler(struct task_struct *p,
35533553

35543554
if (running)
35553555
p->sched_class->set_curr_task(rq);
3556-
if (on_rq) {
3556+
if (queued) {
35573557
/*
35583558
* We enqueue to tail when the priority of a task is
35593559
* increased (user space view).
@@ -4568,7 +4568,7 @@ void init_idle(struct task_struct *idle, int cpu)
45684568
rcu_read_unlock();
45694569

45704570
rq->curr = rq->idle = idle;
4571-
idle->on_rq = 1;
4571+
idle->on_rq = TASK_ON_RQ_QUEUED;
45724572
#if defined(CONFIG_SMP)
45734573
idle->on_cpu = 1;
45744574
#endif
@@ -4645,7 +4645,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
46454645
goto out;
46464646

46474647
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
4648-
if (p->on_rq) {
4648+
if (task_on_rq_queued(p)) {
46494649
struct migration_arg arg = { p, dest_cpu };
46504650
/* Need help from migration thread: drop lock and wait. */
46514651
task_rq_unlock(rq, p, &flags);
@@ -4695,7 +4695,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
46954695
* If we're not on a rq, the next wake-up will ensure we're
46964696
* placed properly.
46974697
*/
4698-
if (p->on_rq) {
4698+
if (task_on_rq_queued(p)) {
46994699
dequeue_task(rq_src, p, 0);
47004700
set_task_cpu(p, dest_cpu);
47014701
enqueue_task(rq_dest, p, 0);
@@ -4736,13 +4736,13 @@ void sched_setnuma(struct task_struct *p, int nid)
47364736
{
47374737
struct rq *rq;
47384738
unsigned long flags;
4739-
bool on_rq, running;
4739+
bool queued, running;
47404740

47414741
rq = task_rq_lock(p, &flags);
4742-
on_rq = p->on_rq;
4742+
queued = task_on_rq_queued(p);
47434743
running = task_current(rq, p);
47444744

4745-
if (on_rq)
4745+
if (queued)
47464746
dequeue_task(rq, p, 0);
47474747
if (running)
47484748
p->sched_class->put_prev_task(rq, p);
@@ -4751,7 +4751,7 @@ void sched_setnuma(struct task_struct *p, int nid)
47514751

47524752
if (running)
47534753
p->sched_class->set_curr_task(rq);
4754-
if (on_rq)
4754+
if (queued)
47554755
enqueue_task(rq, p, 0);
47564756
task_rq_unlock(rq, p, &flags);
47574757
}
@@ -7116,13 +7116,13 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
71167116
.sched_policy = SCHED_NORMAL,
71177117
};
71187118
int old_prio = p->prio;
7119-
int on_rq;
7119+
int queued;
71207120

7121-
on_rq = p->on_rq;
7122-
if (on_rq)
7121+
queued = task_on_rq_queued(p);
7122+
if (queued)
71237123
dequeue_task(rq, p, 0);
71247124
__setscheduler(rq, p, &attr);
7125-
if (on_rq) {
7125+
if (queued) {
71267126
enqueue_task(rq, p, 0);
71277127
resched_curr(rq);
71287128
}
@@ -7309,16 +7309,16 @@ void sched_offline_group(struct task_group *tg)
73097309
void sched_move_task(struct task_struct *tsk)
73107310
{
73117311
struct task_group *tg;
7312-
int on_rq, running;
7312+
int queued, running;
73137313
unsigned long flags;
73147314
struct rq *rq;
73157315

73167316
rq = task_rq_lock(tsk, &flags);
73177317

73187318
running = task_current(rq, tsk);
7319-
on_rq = tsk->on_rq;
7319+
queued = task_on_rq_queued(tsk);
73207320

7321-
if (on_rq)
7321+
if (queued)
73227322
dequeue_task(rq, tsk, 0);
73237323
if (unlikely(running))
73247324
tsk->sched_class->put_prev_task(rq, tsk);
@@ -7331,14 +7331,14 @@ void sched_move_task(struct task_struct *tsk)
73317331

73327332
#ifdef CONFIG_FAIR_GROUP_SCHED
73337333
if (tsk->sched_class->task_move_group)
7334-
tsk->sched_class->task_move_group(tsk, on_rq);
7334+
tsk->sched_class->task_move_group(tsk, queued);
73357335
else
73367336
#endif
73377337
set_task_rq(tsk, task_cpu(tsk));
73387338

73397339
if (unlikely(running))
73407340
tsk->sched_class->set_curr_task(rq);
7341-
if (on_rq)
7341+
if (queued)
73427342
enqueue_task(rq, tsk, 0);
73437343

73447344
task_rq_unlock(rq, tsk, &flags);

kernel/sched/deadline.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -530,7 +530,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
530530
update_rq_clock(rq);
531531
dl_se->dl_throttled = 0;
532532
dl_se->dl_yielded = 0;
533-
if (p->on_rq) {
533+
if (task_on_rq_queued(p)) {
534534
enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
535535
if (task_has_dl_policy(rq->curr))
536536
check_preempt_curr_dl(rq, p, 0);
@@ -1030,7 +1030,7 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
10301030
* means a stop task can slip in, in which case we need to
10311031
* re-start task selection.
10321032
*/
1033-
if (rq->stop && rq->stop->on_rq)
1033+
if (rq->stop && task_on_rq_queued(rq->stop))
10341034
return RETRY_TASK;
10351035
}
10361036

@@ -1257,7 +1257,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
12571257
if (unlikely(task_rq(task) != rq ||
12581258
!cpumask_test_cpu(later_rq->cpu,
12591259
&task->cpus_allowed) ||
1260-
task_running(rq, task) || !task->on_rq)) {
1260+
task_running(rq, task) ||
1261+
!task_on_rq_queued(task))) {
12611262
double_unlock_balance(rq, later_rq);
12621263
later_rq = NULL;
12631264
break;
@@ -1296,7 +1297,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
12961297
BUG_ON(task_current(rq, p));
12971298
BUG_ON(p->nr_cpus_allowed <= 1);
12981299

1299-
BUG_ON(!p->on_rq);
1300+
BUG_ON(!task_on_rq_queued(p));
13001301
BUG_ON(!dl_task(p));
13011302

13021303
return p;
@@ -1443,7 +1444,7 @@ static int pull_dl_task(struct rq *this_rq)
14431444
dl_time_before(p->dl.deadline,
14441445
this_rq->dl.earliest_dl.curr))) {
14451446
WARN_ON(p == src_rq->curr);
1446-
WARN_ON(!p->on_rq);
1447+
WARN_ON(!task_on_rq_queued(p));
14471448

14481449
/*
14491450
* Then we pull iff p has actually an earlier
@@ -1596,7 +1597,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
15961597
if (unlikely(p->dl.dl_throttled))
15971598
return;
15981599

1599-
if (p->on_rq && rq->curr != p) {
1600+
if (task_on_rq_queued(p) && rq->curr != p) {
16001601
#ifdef CONFIG_SMP
16011602
if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
16021603
/* Only reschedule if pushing failed */
@@ -1614,7 +1615,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
16141615
static void prio_changed_dl(struct rq *rq, struct task_struct *p,
16151616
int oldprio)
16161617
{
1617-
if (p->on_rq || rq->curr == p) {
1618+
if (task_on_rq_queued(p) || rq->curr == p) {
16181619
#ifdef CONFIG_SMP
16191620
/*
16201621
* This might be too much, but unfortunately

0 commit comments

Comments
 (0)