Skip to content

Commit 55e16d3

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/fair: Rework throttle_count sync
Since we already take rq->lock when creating a cgroup, use it to also sync the throttle_count and avoid the extra state and enqueue path branch. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: bsegall@google.com Cc: linux-kernel@vger.kernel.org [ Fixed build warning. ] Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 599b484 commit 55e16d3

File tree

2 files changed

+20
-21
lines changed

2 files changed

+20
-21
lines changed

kernel/sched/fair.c

Lines changed: 19 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -4241,26 +4241,6 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
42414241
if (!cfs_bandwidth_used())
42424242
return;
42434243

4244-
/* Synchronize hierarchical throttle counter: */
4245-
if (unlikely(!cfs_rq->throttle_uptodate)) {
4246-
struct rq *rq = rq_of(cfs_rq);
4247-
struct cfs_rq *pcfs_rq;
4248-
struct task_group *tg;
4249-
4250-
cfs_rq->throttle_uptodate = 1;
4251-
4252-
/* Get closest up-to-date node, because leaves go first: */
4253-
for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
4254-
pcfs_rq = tg->cfs_rq[cpu_of(rq)];
4255-
if (pcfs_rq->throttle_uptodate)
4256-
break;
4257-
}
4258-
if (tg) {
4259-
cfs_rq->throttle_count = pcfs_rq->throttle_count;
4260-
cfs_rq->throttled_clock_task = rq_clock_task(rq);
4261-
}
4262-
}
4263-
42644244
/* an active group must be handled by the update_curr()->put() path */
42654245
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
42664246
return;
@@ -4275,6 +4255,23 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
42754255
throttle_cfs_rq(cfs_rq);
42764256
}
42774257

4258+
static void sync_throttle(struct task_group *tg, int cpu)
4259+
{
4260+
struct cfs_rq *pcfs_rq, *cfs_rq;
4261+
4262+
if (!cfs_bandwidth_used())
4263+
return;
4264+
4265+
if (!tg->parent)
4266+
return;
4267+
4268+
cfs_rq = tg->cfs_rq[cpu];
4269+
pcfs_rq = tg->parent->cfs_rq[cpu];
4270+
4271+
cfs_rq->throttle_count = pcfs_rq->throttle_count;
4272+
pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
4273+
}
4274+
42784275
/* conditionally throttle active cfs_rq's from put_prev_entity() */
42794276
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
42804277
{
@@ -4414,6 +4411,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
44144411
static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
44154412
static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq) { return false; }
44164413
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
4414+
static inline void sync_throttle(struct task_group *tg, int cpu) {}
44174415
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
44184416

44194417
static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
@@ -8646,6 +8644,7 @@ void online_fair_sched_group(struct task_group *tg)
86468644

86478645
raw_spin_lock_irq(&rq->lock);
86488646
post_init_entity_util_avg(se);
8647+
sync_throttle(tg, i);
86498648
raw_spin_unlock_irq(&rq->lock);
86508649
}
86518650
}

kernel/sched/sched.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -438,7 +438,7 @@ struct cfs_rq {
438438

439439
u64 throttled_clock, throttled_clock_task;
440440
u64 throttled_clock_task_time;
441-
int throttled, throttle_count, throttle_uptodate;
441+
int throttled, throttle_count;
442442
struct list_head throttled_list;
443443
#endif /* CONFIG_CFS_BANDWIDTH */
444444
#endif /* CONFIG_FAIR_GROUP_SCHED */

0 commit comments

Comments
 (0)