Skip to content

Commit f4ebcbc

Browse files
tkhaiIngo Molnar
authored andcommitted
sched/rt: Substract number of tasks of throttled queues from rq->nr_running
Now rq->rt becomes to be able to be in dequeued or enqueued state. We add new member rt_rq->rt_queued, which is used to indicate this. The member is used only for top queue rq->rt_rq. The goal is to fit generic scheme which is used in deadline and fair classes, i.e. throttled rt_rq's rt_nr_running is beeing substracted from rq->nr_running. Signed-off-by: Kirill Tkhai <tkhai@yandex.ru> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1394835300.18748.33.camel@HP-250-G1-Notebook-PC Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 653d07a commit f4ebcbc

File tree

2 files changed

+63
-12
lines changed

2 files changed

+63
-12
lines changed

kernel/sched/rt.c

Lines changed: 61 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -79,6 +79,8 @@ void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
7979
rt_rq->overloaded = 0;
8080
plist_head_init(&rt_rq->pushable_tasks);
8181
#endif
82+
/* We start is dequeued state, because no RT tasks are queued */
83+
rt_rq->rt_queued = 0;
8284

8385
rt_rq->rt_time = 0;
8486
rt_rq->rt_throttled = 0;
@@ -404,6 +406,9 @@ static inline void set_post_schedule(struct rq *rq)
404406
}
405407
#endif /* CONFIG_SMP */
406408

409+
static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
410+
static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
411+
407412
static inline int on_rt_rq(struct sched_rt_entity *rt_se)
408413
{
409414
return !list_empty(&rt_se->run_list);
@@ -465,8 +470,11 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
465470
rt_se = rt_rq->tg->rt_se[cpu];
466471

467472
if (rt_rq->rt_nr_running) {
468-
if (rt_se && !on_rt_rq(rt_se))
473+
if (!rt_se)
474+
enqueue_top_rt_rq(rt_rq);
475+
else if (!on_rt_rq(rt_se))
469476
enqueue_rt_entity(rt_se, false);
477+
470478
if (rt_rq->highest_prio.curr < curr->prio)
471479
resched_task(curr);
472480
}
@@ -479,7 +487,9 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
479487

480488
rt_se = rt_rq->tg->rt_se[cpu];
481489

482-
if (rt_se && on_rt_rq(rt_se))
490+
if (!rt_se)
491+
dequeue_top_rt_rq(rt_rq);
492+
else if (on_rt_rq(rt_se))
483493
dequeue_rt_entity(rt_se);
484494
}
485495

@@ -545,12 +555,18 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
545555

546556
static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
547557
{
548-
if (rt_rq->rt_nr_running)
549-
resched_task(rq_of_rt_rq(rt_rq)->curr);
558+
struct rq *rq = rq_of_rt_rq(rt_rq);
559+
560+
if (!rt_rq->rt_nr_running)
561+
return;
562+
563+
enqueue_top_rt_rq(rt_rq);
564+
resched_task(rq->curr);
550565
}
551566

552567
static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
553568
{
569+
dequeue_top_rt_rq(rt_rq);
554570
}
555571

556572
static inline const struct cpumask *sched_rt_period_mask(void)
@@ -935,6 +951,38 @@ static void update_curr_rt(struct rq *rq)
935951
}
936952
}
937953

954+
static void
955+
dequeue_top_rt_rq(struct rt_rq *rt_rq)
956+
{
957+
struct rq *rq = rq_of_rt_rq(rt_rq);
958+
959+
BUG_ON(&rq->rt != rt_rq);
960+
961+
if (!rt_rq->rt_queued)
962+
return;
963+
964+
BUG_ON(!rq->nr_running);
965+
966+
rq->nr_running -= rt_rq->rt_nr_running;
967+
rt_rq->rt_queued = 0;
968+
}
969+
970+
static void
971+
enqueue_top_rt_rq(struct rt_rq *rt_rq)
972+
{
973+
struct rq *rq = rq_of_rt_rq(rt_rq);
974+
975+
BUG_ON(&rq->rt != rt_rq);
976+
977+
if (rt_rq->rt_queued)
978+
return;
979+
if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
980+
return;
981+
982+
rq->nr_running += rt_rq->rt_nr_running;
983+
rt_rq->rt_queued = 1;
984+
}
985+
938986
#if defined CONFIG_SMP
939987

940988
static void
@@ -1143,6 +1191,8 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
11431191
back = rt_se;
11441192
}
11451193

1194+
dequeue_top_rt_rq(rt_rq_of_se(back));
1195+
11461196
for (rt_se = back; rt_se; rt_se = rt_se->back) {
11471197
if (on_rt_rq(rt_se))
11481198
__dequeue_rt_entity(rt_se);
@@ -1151,13 +1201,18 @@ static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
11511201

11521202
static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
11531203
{
1204+
struct rq *rq = rq_of_rt_se(rt_se);
1205+
11541206
dequeue_rt_stack(rt_se);
11551207
for_each_sched_rt_entity(rt_se)
11561208
__enqueue_rt_entity(rt_se, head);
1209+
enqueue_top_rt_rq(&rq->rt);
11571210
}
11581211

11591212
static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
11601213
{
1214+
struct rq *rq = rq_of_rt_se(rt_se);
1215+
11611216
dequeue_rt_stack(rt_se);
11621217

11631218
for_each_sched_rt_entity(rt_se) {
@@ -1166,6 +1221,7 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
11661221
if (rt_rq && rt_rq->rt_nr_running)
11671222
__enqueue_rt_entity(rt_se, false);
11681223
}
1224+
enqueue_top_rt_rq(&rq->rt);
11691225
}
11701226

11711227
/*
@@ -1183,8 +1239,6 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
11831239

11841240
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
11851241
enqueue_pushable_task(rq, p);
1186-
1187-
inc_nr_running(rq);
11881242
}
11891243

11901244
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1195,8 +1249,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
11951249
dequeue_rt_entity(rt_se);
11961250

11971251
dequeue_pushable_task(rq, p);
1198-
1199-
dec_nr_running(rq);
12001252
}
12011253

12021254
/*
@@ -1401,10 +1453,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev)
14011453
if (prev->sched_class == &rt_sched_class)
14021454
update_curr_rt(rq);
14031455

1404-
if (!rt_rq->rt_nr_running)
1405-
return NULL;
1406-
1407-
if (rt_rq_throttled(rt_rq))
1456+
if (!rt_rq->rt_queued)
14081457
return NULL;
14091458

14101459
put_prev_task(rq, prev);

kernel/sched/sched.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -409,6 +409,8 @@ struct rt_rq {
409409
int overloaded;
410410
struct plist_head pushable_tasks;
411411
#endif
412+
int rt_queued;
413+
412414
int rt_throttled;
413415
u64 rt_time;
414416
u64 rt_runtime;

0 commit comments

Comments
 (0)