@@ -79,6 +79,8 @@ void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
79
79
rt_rq -> overloaded = 0 ;
80
80
plist_head_init (& rt_rq -> pushable_tasks );
81
81
#endif
82
+ /* We start is dequeued state, because no RT tasks are queued */
83
+ rt_rq -> rt_queued = 0 ;
82
84
83
85
rt_rq -> rt_time = 0 ;
84
86
rt_rq -> rt_throttled = 0 ;
@@ -404,6 +406,9 @@ static inline void set_post_schedule(struct rq *rq)
404
406
}
405
407
#endif /* CONFIG_SMP */
406
408
409
+ static void enqueue_top_rt_rq (struct rt_rq * rt_rq );
410
+ static void dequeue_top_rt_rq (struct rt_rq * rt_rq );
411
+
407
412
static inline int on_rt_rq (struct sched_rt_entity * rt_se )
408
413
{
409
414
return !list_empty (& rt_se -> run_list );
@@ -465,8 +470,11 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
465
470
rt_se = rt_rq -> tg -> rt_se [cpu ];
466
471
467
472
if (rt_rq -> rt_nr_running ) {
468
- if (rt_se && !on_rt_rq (rt_se ))
473
+ if (!rt_se )
474
+ enqueue_top_rt_rq (rt_rq );
475
+ else if (!on_rt_rq (rt_se ))
469
476
enqueue_rt_entity (rt_se , false);
477
+
470
478
if (rt_rq -> highest_prio .curr < curr -> prio )
471
479
resched_task (curr );
472
480
}
@@ -479,7 +487,9 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
479
487
480
488
rt_se = rt_rq -> tg -> rt_se [cpu ];
481
489
482
- if (rt_se && on_rt_rq (rt_se ))
490
+ if (!rt_se )
491
+ dequeue_top_rt_rq (rt_rq );
492
+ else if (on_rt_rq (rt_se ))
483
493
dequeue_rt_entity (rt_se );
484
494
}
485
495
@@ -545,12 +555,18 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
545
555
546
556
static inline void sched_rt_rq_enqueue (struct rt_rq * rt_rq )
547
557
{
548
- if (rt_rq -> rt_nr_running )
549
- resched_task (rq_of_rt_rq (rt_rq )-> curr );
558
+ struct rq * rq = rq_of_rt_rq (rt_rq );
559
+
560
+ if (!rt_rq -> rt_nr_running )
561
+ return ;
562
+
563
+ enqueue_top_rt_rq (rt_rq );
564
+ resched_task (rq -> curr );
550
565
}
551
566
552
567
static inline void sched_rt_rq_dequeue (struct rt_rq * rt_rq )
553
568
{
569
+ dequeue_top_rt_rq (rt_rq );
554
570
}
555
571
556
572
static inline const struct cpumask * sched_rt_period_mask (void )
@@ -935,6 +951,38 @@ static void update_curr_rt(struct rq *rq)
935
951
}
936
952
}
937
953
954
+ static void
955
+ dequeue_top_rt_rq (struct rt_rq * rt_rq )
956
+ {
957
+ struct rq * rq = rq_of_rt_rq (rt_rq );
958
+
959
+ BUG_ON (& rq -> rt != rt_rq );
960
+
961
+ if (!rt_rq -> rt_queued )
962
+ return ;
963
+
964
+ BUG_ON (!rq -> nr_running );
965
+
966
+ rq -> nr_running -= rt_rq -> rt_nr_running ;
967
+ rt_rq -> rt_queued = 0 ;
968
+ }
969
+
970
+ static void
971
+ enqueue_top_rt_rq (struct rt_rq * rt_rq )
972
+ {
973
+ struct rq * rq = rq_of_rt_rq (rt_rq );
974
+
975
+ BUG_ON (& rq -> rt != rt_rq );
976
+
977
+ if (rt_rq -> rt_queued )
978
+ return ;
979
+ if (rt_rq_throttled (rt_rq ) || !rt_rq -> rt_nr_running )
980
+ return ;
981
+
982
+ rq -> nr_running += rt_rq -> rt_nr_running ;
983
+ rt_rq -> rt_queued = 1 ;
984
+ }
985
+
938
986
#if defined CONFIG_SMP
939
987
940
988
static void
@@ -1143,6 +1191,8 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1143
1191
back = rt_se ;
1144
1192
}
1145
1193
1194
+ dequeue_top_rt_rq (rt_rq_of_se (back ));
1195
+
1146
1196
for (rt_se = back ; rt_se ; rt_se = rt_se -> back ) {
1147
1197
if (on_rt_rq (rt_se ))
1148
1198
__dequeue_rt_entity (rt_se );
@@ -1151,13 +1201,18 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1151
1201
1152
1202
static void enqueue_rt_entity (struct sched_rt_entity * rt_se , bool head )
1153
1203
{
1204
+ struct rq * rq = rq_of_rt_se (rt_se );
1205
+
1154
1206
dequeue_rt_stack (rt_se );
1155
1207
for_each_sched_rt_entity (rt_se )
1156
1208
__enqueue_rt_entity (rt_se , head );
1209
+ enqueue_top_rt_rq (& rq -> rt );
1157
1210
}
1158
1211
1159
1212
static void dequeue_rt_entity (struct sched_rt_entity * rt_se )
1160
1213
{
1214
+ struct rq * rq = rq_of_rt_se (rt_se );
1215
+
1161
1216
dequeue_rt_stack (rt_se );
1162
1217
1163
1218
for_each_sched_rt_entity (rt_se ) {
@@ -1166,6 +1221,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1166
1221
if (rt_rq && rt_rq -> rt_nr_running )
1167
1222
__enqueue_rt_entity (rt_se , false);
1168
1223
}
1224
+ enqueue_top_rt_rq (& rq -> rt );
1169
1225
}
1170
1226
1171
1227
/*
@@ -1183,8 +1239,6 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1183
1239
1184
1240
if (!task_current (rq , p ) && p -> nr_cpus_allowed > 1 )
1185
1241
enqueue_pushable_task (rq , p );
1186
-
1187
- inc_nr_running (rq );
1188
1242
}
1189
1243
1190
1244
static void dequeue_task_rt (struct rq * rq , struct task_struct * p , int flags )
@@ -1195,8 +1249,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1195
1249
dequeue_rt_entity (rt_se );
1196
1250
1197
1251
dequeue_pushable_task (rq , p );
1198
-
1199
- dec_nr_running (rq );
1200
1252
}
1201
1253
1202
1254
/*
@@ -1401,10 +1453,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
1401
1453
if (prev -> sched_class == & rt_sched_class )
1402
1454
update_curr_rt (rq );
1403
1455
1404
- if (!rt_rq -> rt_nr_running )
1405
- return NULL ;
1406
-
1407
- if (rt_rq_throttled (rt_rq ))
1456
+ if (!rt_rq -> rt_queued )
1408
1457
return NULL ;
1409
1458
1410
1459
put_prev_task (rq , prev );
0 commit comments