@@ -1043,7 +1043,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1043
1043
* A queue event has occurred, and we're going to schedule. In
1044
1044
* this case, we can save a useless back to back clock update.
1045
1045
*/
1046
- if (rq -> curr -> on_rq && test_tsk_need_resched (rq -> curr ))
1046
+ if (task_on_rq_queued ( rq -> curr ) && test_tsk_need_resched (rq -> curr ))
1047
1047
rq -> skip_clock_update = 1 ;
1048
1048
}
1049
1049
@@ -1088,7 +1088,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1088
1088
1089
1089
static void __migrate_swap_task (struct task_struct * p , int cpu )
1090
1090
{
1091
- if (p -> on_rq ) {
1091
+ if (task_on_rq_queued ( p ) ) {
1092
1092
struct rq * src_rq , * dst_rq ;
1093
1093
1094
1094
src_rq = task_rq (p );
@@ -1214,7 +1214,7 @@ static int migration_cpu_stop(void *data);
1214
1214
unsigned long wait_task_inactive (struct task_struct * p , long match_state )
1215
1215
{
1216
1216
unsigned long flags ;
1217
- int running , on_rq ;
1217
+ int running , queued ;
1218
1218
unsigned long ncsw ;
1219
1219
struct rq * rq ;
1220
1220
@@ -1252,7 +1252,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1252
1252
rq = task_rq_lock (p , & flags );
1253
1253
trace_sched_wait_task (p );
1254
1254
running = task_running (rq , p );
1255
- on_rq = p -> on_rq ;
1255
+ queued = task_on_rq_queued ( p ) ;
1256
1256
ncsw = 0 ;
1257
1257
if (!match_state || p -> state == match_state )
1258
1258
ncsw = p -> nvcsw | LONG_MIN ; /* sets MSB */
@@ -1284,7 +1284,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1284
1284
* running right now), it's preempted, and we should
1285
1285
* yield - it could be a while.
1286
1286
*/
1287
- if (unlikely (on_rq )) {
1287
+ if (unlikely (queued )) {
1288
1288
ktime_t to = ktime_set (0 , NSEC_PER_SEC /HZ );
1289
1289
1290
1290
set_current_state (TASK_UNINTERRUPTIBLE );
@@ -1478,7 +1478,7 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1478
1478
static void ttwu_activate (struct rq * rq , struct task_struct * p , int en_flags )
1479
1479
{
1480
1480
activate_task (rq , p , en_flags );
1481
- p -> on_rq = 1 ;
1481
+ p -> on_rq = TASK_ON_RQ_QUEUED ;
1482
1482
1483
1483
/* if a worker is waking up, notify workqueue */
1484
1484
if (p -> flags & PF_WQ_WORKER )
@@ -1537,7 +1537,7 @@ static int ttwu_remote(struct task_struct *p, int wake_flags)
1537
1537
int ret = 0 ;
1538
1538
1539
1539
rq = __task_rq_lock (p );
1540
- if (p -> on_rq ) {
1540
+ if (task_on_rq_queued ( p ) ) {
1541
1541
/* check_preempt_curr() may use rq clock */
1542
1542
update_rq_clock (rq );
1543
1543
ttwu_do_wakeup (rq , p , wake_flags );
@@ -1678,7 +1678,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1678
1678
success = 1 ; /* we're going to change ->state */
1679
1679
cpu = task_cpu (p );
1680
1680
1681
- if (p -> on_rq && ttwu_remote (p , wake_flags ))
1681
+ if (task_on_rq_queued ( p ) && ttwu_remote (p , wake_flags ))
1682
1682
goto stat ;
1683
1683
1684
1684
#ifdef CONFIG_SMP
@@ -1742,7 +1742,7 @@ static void try_to_wake_up_local(struct task_struct *p)
1742
1742
if (!(p -> state & TASK_NORMAL ))
1743
1743
goto out ;
1744
1744
1745
- if (!p -> on_rq )
1745
+ if (!task_on_rq_queued ( p ) )
1746
1746
ttwu_activate (rq , p , ENQUEUE_WAKEUP );
1747
1747
1748
1748
ttwu_do_wakeup (rq , p , 0 );
@@ -2095,7 +2095,7 @@ void wake_up_new_task(struct task_struct *p)
2095
2095
init_task_runnable_average (p );
2096
2096
rq = __task_rq_lock (p );
2097
2097
activate_task (rq , p , 0 );
2098
- p -> on_rq = 1 ;
2098
+ p -> on_rq = TASK_ON_RQ_QUEUED ;
2099
2099
trace_sched_wakeup_new (p , true);
2100
2100
check_preempt_curr (rq , p , WF_FORK );
2101
2101
#ifdef CONFIG_SMP
@@ -2444,7 +2444,7 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2444
2444
* project cycles that may never be accounted to this
2445
2445
* thread, breaking clock_gettime().
2446
2446
*/
2447
- if (task_current (rq , p ) && p -> on_rq ) {
2447
+ if (task_current (rq , p ) && task_on_rq_queued ( p ) ) {
2448
2448
update_rq_clock (rq );
2449
2449
ns = rq_clock_task (rq ) - p -> se .exec_start ;
2450
2450
if ((s64 )ns < 0 )
@@ -2490,7 +2490,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2490
2490
* If we see ->on_cpu without ->on_rq, the task is leaving, and has
2491
2491
* been accounted, so we're correct here as well.
2492
2492
*/
2493
- if (!p -> on_cpu || !p -> on_rq )
2493
+ if (!p -> on_cpu || !task_on_rq_queued ( p ) )
2494
2494
return p -> se .sum_exec_runtime ;
2495
2495
#endif
2496
2496
@@ -2794,7 +2794,7 @@ static void __sched __schedule(void)
2794
2794
switch_count = & prev -> nvcsw ;
2795
2795
}
2796
2796
2797
- if (prev -> on_rq || rq -> skip_clock_update < 0 )
2797
+ if (task_on_rq_queued ( prev ) || rq -> skip_clock_update < 0 )
2798
2798
update_rq_clock (rq );
2799
2799
2800
2800
next = pick_next_task (rq , prev );
@@ -2959,7 +2959,7 @@ EXPORT_SYMBOL(default_wake_function);
2959
2959
*/
2960
2960
void rt_mutex_setprio (struct task_struct * p , int prio )
2961
2961
{
2962
- int oldprio , on_rq , running , enqueue_flag = 0 ;
2962
+ int oldprio , queued , running , enqueue_flag = 0 ;
2963
2963
struct rq * rq ;
2964
2964
const struct sched_class * prev_class ;
2965
2965
@@ -2988,9 +2988,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
2988
2988
trace_sched_pi_setprio (p , prio );
2989
2989
oldprio = p -> prio ;
2990
2990
prev_class = p -> sched_class ;
2991
- on_rq = p -> on_rq ;
2991
+ queued = task_on_rq_queued ( p ) ;
2992
2992
running = task_current (rq , p );
2993
- if (on_rq )
2993
+ if (queued )
2994
2994
dequeue_task (rq , p , 0 );
2995
2995
if (running )
2996
2996
p -> sched_class -> put_prev_task (rq , p );
@@ -3030,7 +3030,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3030
3030
3031
3031
if (running )
3032
3032
p -> sched_class -> set_curr_task (rq );
3033
- if (on_rq )
3033
+ if (queued )
3034
3034
enqueue_task (rq , p , enqueue_flag );
3035
3035
3036
3036
check_class_changed (rq , p , prev_class , oldprio );
@@ -3041,7 +3041,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3041
3041
3042
3042
void set_user_nice (struct task_struct * p , long nice )
3043
3043
{
3044
- int old_prio , delta , on_rq ;
3044
+ int old_prio , delta , queued ;
3045
3045
unsigned long flags ;
3046
3046
struct rq * rq ;
3047
3047
@@ -3062,8 +3062,8 @@ void set_user_nice(struct task_struct *p, long nice)
3062
3062
p -> static_prio = NICE_TO_PRIO (nice );
3063
3063
goto out_unlock ;
3064
3064
}
3065
- on_rq = p -> on_rq ;
3066
- if (on_rq )
3065
+ queued = task_on_rq_queued ( p ) ;
3066
+ if (queued )
3067
3067
dequeue_task (rq , p , 0 );
3068
3068
3069
3069
p -> static_prio = NICE_TO_PRIO (nice );
@@ -3072,7 +3072,7 @@ void set_user_nice(struct task_struct *p, long nice)
3072
3072
p -> prio = effective_prio (p );
3073
3073
delta = p -> prio - old_prio ;
3074
3074
3075
- if (on_rq ) {
3075
+ if (queued ) {
3076
3076
enqueue_task (rq , p , 0 );
3077
3077
/*
3078
3078
* If the task increased its priority or is running and
@@ -3344,7 +3344,7 @@ static int __sched_setscheduler(struct task_struct *p,
3344
3344
{
3345
3345
int newprio = dl_policy (attr -> sched_policy ) ? MAX_DL_PRIO - 1 :
3346
3346
MAX_RT_PRIO - 1 - attr -> sched_priority ;
3347
- int retval , oldprio , oldpolicy = -1 , on_rq , running ;
3347
+ int retval , oldprio , oldpolicy = -1 , queued , running ;
3348
3348
int policy = attr -> sched_policy ;
3349
3349
unsigned long flags ;
3350
3350
const struct sched_class * prev_class ;
@@ -3541,9 +3541,9 @@ static int __sched_setscheduler(struct task_struct *p,
3541
3541
return 0 ;
3542
3542
}
3543
3543
3544
- on_rq = p -> on_rq ;
3544
+ queued = task_on_rq_queued ( p ) ;
3545
3545
running = task_current (rq , p );
3546
- if (on_rq )
3546
+ if (queued )
3547
3547
dequeue_task (rq , p , 0 );
3548
3548
if (running )
3549
3549
p -> sched_class -> put_prev_task (rq , p );
@@ -3553,7 +3553,7 @@ static int __sched_setscheduler(struct task_struct *p,
3553
3553
3554
3554
if (running )
3555
3555
p -> sched_class -> set_curr_task (rq );
3556
- if (on_rq ) {
3556
+ if (queued ) {
3557
3557
/*
3558
3558
* We enqueue to tail when the priority of a task is
3559
3559
* increased (user space view).
@@ -4568,7 +4568,7 @@ void init_idle(struct task_struct *idle, int cpu)
4568
4568
rcu_read_unlock ();
4569
4569
4570
4570
rq -> curr = rq -> idle = idle ;
4571
- idle -> on_rq = 1 ;
4571
+ idle -> on_rq = TASK_ON_RQ_QUEUED ;
4572
4572
#if defined(CONFIG_SMP )
4573
4573
idle -> on_cpu = 1 ;
4574
4574
#endif
@@ -4645,7 +4645,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4645
4645
goto out ;
4646
4646
4647
4647
dest_cpu = cpumask_any_and (cpu_active_mask , new_mask );
4648
- if (p -> on_rq ) {
4648
+ if (task_on_rq_queued ( p ) ) {
4649
4649
struct migration_arg arg = { p , dest_cpu };
4650
4650
/* Need help from migration thread: drop lock and wait. */
4651
4651
task_rq_unlock (rq , p , & flags );
@@ -4695,7 +4695,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4695
4695
* If we're not on a rq, the next wake-up will ensure we're
4696
4696
* placed properly.
4697
4697
*/
4698
- if (p -> on_rq ) {
4698
+ if (task_on_rq_queued ( p ) ) {
4699
4699
dequeue_task (rq_src , p , 0 );
4700
4700
set_task_cpu (p , dest_cpu );
4701
4701
enqueue_task (rq_dest , p , 0 );
@@ -4736,13 +4736,13 @@ void sched_setnuma(struct task_struct *p, int nid)
4736
4736
{
4737
4737
struct rq * rq ;
4738
4738
unsigned long flags ;
4739
- bool on_rq , running ;
4739
+ bool queued , running ;
4740
4740
4741
4741
rq = task_rq_lock (p , & flags );
4742
- on_rq = p -> on_rq ;
4742
+ queued = task_on_rq_queued ( p ) ;
4743
4743
running = task_current (rq , p );
4744
4744
4745
- if (on_rq )
4745
+ if (queued )
4746
4746
dequeue_task (rq , p , 0 );
4747
4747
if (running )
4748
4748
p -> sched_class -> put_prev_task (rq , p );
@@ -4751,7 +4751,7 @@ void sched_setnuma(struct task_struct *p, int nid)
4751
4751
4752
4752
if (running )
4753
4753
p -> sched_class -> set_curr_task (rq );
4754
- if (on_rq )
4754
+ if (queued )
4755
4755
enqueue_task (rq , p , 0 );
4756
4756
task_rq_unlock (rq , p , & flags );
4757
4757
}
@@ -7116,13 +7116,13 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7116
7116
.sched_policy = SCHED_NORMAL ,
7117
7117
};
7118
7118
int old_prio = p -> prio ;
7119
- int on_rq ;
7119
+ int queued ;
7120
7120
7121
- on_rq = p -> on_rq ;
7122
- if (on_rq )
7121
+ queued = task_on_rq_queued ( p ) ;
7122
+ if (queued )
7123
7123
dequeue_task (rq , p , 0 );
7124
7124
__setscheduler (rq , p , & attr );
7125
- if (on_rq ) {
7125
+ if (queued ) {
7126
7126
enqueue_task (rq , p , 0 );
7127
7127
resched_curr (rq );
7128
7128
}
@@ -7309,16 +7309,16 @@ void sched_offline_group(struct task_group *tg)
7309
7309
void sched_move_task (struct task_struct * tsk )
7310
7310
{
7311
7311
struct task_group * tg ;
7312
- int on_rq , running ;
7312
+ int queued , running ;
7313
7313
unsigned long flags ;
7314
7314
struct rq * rq ;
7315
7315
7316
7316
rq = task_rq_lock (tsk , & flags );
7317
7317
7318
7318
running = task_current (rq , tsk );
7319
- on_rq = tsk -> on_rq ;
7319
+ queued = task_on_rq_queued ( tsk ) ;
7320
7320
7321
- if (on_rq )
7321
+ if (queued )
7322
7322
dequeue_task (rq , tsk , 0 );
7323
7323
if (unlikely (running ))
7324
7324
tsk -> sched_class -> put_prev_task (rq , tsk );
@@ -7331,14 +7331,14 @@ void sched_move_task(struct task_struct *tsk)
7331
7331
7332
7332
#ifdef CONFIG_FAIR_GROUP_SCHED
7333
7333
if (tsk -> sched_class -> task_move_group )
7334
- tsk -> sched_class -> task_move_group (tsk , on_rq );
7334
+ tsk -> sched_class -> task_move_group (tsk , queued );
7335
7335
else
7336
7336
#endif
7337
7337
set_task_rq (tsk , task_cpu (tsk ));
7338
7338
7339
7339
if (unlikely (running ))
7340
7340
tsk -> sched_class -> set_curr_task (rq );
7341
- if (on_rq )
7341
+ if (queued )
7342
7342
enqueue_task (rq , tsk , 0 );
7343
7343
7344
7344
task_rq_unlock (rq , tsk , & flags );
0 commit comments