Skip to content

Commit 1936c53

Browse files
vingu-linaroIngo Molnar
authored andcommitted
sched/fair: Reduce the periodic update duration
Instead of using the cfs_rq_is_decayed() which monitors all *_avg and *_sum, we create a cfs_rq_has_blocked() which only takes care of util_avg and load_avg. We are only interested by these 2 values which are decaying faster than the *_sum so we can stop the periodic update earlier. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: brendan.jackman@arm.com Cc: dietmar.eggemann@arm.com Cc: morten.rasmussen@foss.arm.com Cc: valentin.schneider@arm.com Link: http://lkml.kernel.org/r/1518517879-2280-3-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent f643ea2 commit 1936c53

File tree

1 file changed

+17
-4
lines changed

1 file changed

+17
-4
lines changed

kernel/sched/fair.c

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7424,6 +7424,19 @@ static void attach_tasks(struct lb_env *env)
74247424
rq_unlock(env->dst_rq, &rf);
74257425
}
74267426

7427+
static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq)
7428+
{
7429+
if (cfs_rq->avg.load_avg)
7430+
return true;
7431+
7432+
if (cfs_rq->avg.util_avg)
7433+
return true;
7434+
7435+
return false;
7436+
}
7437+
7438+
#ifdef CONFIG_FAIR_GROUP_SCHED
7439+
74277440
static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
74287441
{
74297442
if (cfs_rq->load.weight)
@@ -7441,8 +7454,6 @@ static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
74417454
return true;
74427455
}
74437456

7444-
#ifdef CONFIG_FAIR_GROUP_SCHED
7445-
74467457
static void update_blocked_averages(int cpu)
74477458
{
74487459
struct rq *rq = cpu_rq(cpu);
@@ -7478,7 +7489,9 @@ static void update_blocked_averages(int cpu)
74787489
*/
74797490
if (cfs_rq_is_decayed(cfs_rq))
74807491
list_del_leaf_cfs_rq(cfs_rq);
7481-
else
7492+
7493+
/* Don't need periodic decay once load/util_avg are null */
7494+
if (cfs_rq_has_blocked(cfs_rq))
74827495
done = false;
74837496
}
74847497

@@ -7548,7 +7561,7 @@ static inline void update_blocked_averages(int cpu)
75487561
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
75497562
#ifdef CONFIG_NO_HZ_COMMON
75507563
rq->last_blocked_load_update_tick = jiffies;
7551-
if (cfs_rq_is_decayed(cfs_rq))
7564+
if (!cfs_rq_has_blocked(cfs_rq))
75527565
rq->has_blocked_load = 0;
75537566
#endif
75547567
rq_unlock_irqrestore(rq, &rf);

0 commit comments

Comments
 (0)