Skip to content

Commit e022e0d

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/fair: Update blocked load from NEWIDLE
Since we already iterate CPUs looking for work on NEWIDLE, use this iteration to age the blocked load. If the domain for which this is done completely spand the idle set, we can push the ILB based aging forward. Suggested-by: Brendan Jackman <brendan.jackman@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent a4064fb commit e022e0d

File tree

3 files changed

+45
-6
lines changed

3 files changed

+45
-6
lines changed

kernel/sched/core.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6074,6 +6074,7 @@ void __init sched_init(void)
60746074
rq_attach_root(rq, &def_root_domain);
60756075
#ifdef CONFIG_NO_HZ_COMMON
60766076
rq->last_load_update_tick = jiffies;
6077+
rq->last_blocked_load_update_tick = jiffies;
60776078
atomic_set(&rq->nohz_flags, 0);
60786079
#endif
60796080
#endif /* CONFIG_SMP */

kernel/sched/fair.c

Lines changed: 43 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -5376,6 +5376,14 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
53765376
}
53775377
return load;
53785378
}
5379+
5380+
static struct {
5381+
cpumask_var_t idle_cpus_mask;
5382+
atomic_t nr_cpus;
5383+
unsigned long next_balance; /* in jiffy units */
5384+
unsigned long next_stats;
5385+
} nohz ____cacheline_aligned;
5386+
53795387
#endif /* CONFIG_NO_HZ_COMMON */
53805388

53815389
/**
@@ -7022,6 +7030,7 @@ enum fbq_type { regular, remote, all };
70227030
#define LBF_NEED_BREAK 0x02
70237031
#define LBF_DST_PINNED 0x04
70247032
#define LBF_SOME_PINNED 0x08
7033+
#define LBF_NOHZ_STATS 0x10
70257034

70267035
struct lb_env {
70277036
struct sched_domain *sd;
@@ -7460,6 +7469,10 @@ static void update_blocked_averages(int cpu)
74607469
if (cfs_rq_is_decayed(cfs_rq))
74617470
list_del_leaf_cfs_rq(cfs_rq);
74627471
}
7472+
7473+
#ifdef CONFIG_NO_HZ_COMMON
7474+
rq->last_blocked_load_update_tick = jiffies;
7475+
#endif
74637476
rq_unlock_irqrestore(rq, &rf);
74647477
}
74657478

@@ -7519,6 +7532,9 @@ static inline void update_blocked_averages(int cpu)
75197532
rq_lock_irqsave(rq, &rf);
75207533
update_rq_clock(rq);
75217534
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
7535+
#ifdef CONFIG_NO_HZ_COMMON
7536+
rq->last_blocked_load_update_tick = jiffies;
7537+
#endif
75227538
rq_unlock_irqrestore(rq, &rf);
75237539
}
75247540

@@ -7853,6 +7869,21 @@ group_type group_classify(struct sched_group *group,
78537869
return group_other;
78547870
}
78557871

7872+
static void update_nohz_stats(struct rq *rq)
7873+
{
7874+
#ifdef CONFIG_NO_HZ_COMMON
7875+
unsigned int cpu = rq->cpu;
7876+
7877+
if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
7878+
return;
7879+
7880+
if (!time_after(jiffies, rq->last_blocked_load_update_tick))
7881+
return;
7882+
7883+
update_blocked_averages(cpu);
7884+
#endif
7885+
}
7886+
78567887
/**
78577888
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
78587889
* @env: The load balancing environment.
@@ -7875,6 +7906,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
78757906
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
78767907
struct rq *rq = cpu_rq(i);
78777908

7909+
if (env->flags & LBF_NOHZ_STATS)
7910+
update_nohz_stats(rq);
7911+
78787912
/* Bias balancing toward CPUs of our domain: */
78797913
if (local_group)
78807914
load = target_load(i, load_idx);
@@ -8030,6 +8064,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
80308064
if (child && child->flags & SD_PREFER_SIBLING)
80318065
prefer_sibling = 1;
80328066

8067+
#ifdef CONFIG_NO_HZ_COMMON
8068+
if (env->idle == CPU_NEWLY_IDLE) {
8069+
env->flags |= LBF_NOHZ_STATS;
8070+
8071+
if (cpumask_subset(nohz.idle_cpus_mask, sched_domain_span(env->sd)))
8072+
nohz.next_stats = jiffies + msecs_to_jiffies(LOAD_AVG_PERIOD);
8073+
}
8074+
#endif
8075+
80338076
load_idx = get_sd_load_idx(env->sd, env->idle);
80348077

80358078
do {
@@ -9049,12 +9092,6 @@ static inline int on_null_domain(struct rq *rq)
90499092
* needed, they will kick the idle load balancer, which then does idle
90509093
* load balancing for all the idle CPUs.
90519094
*/
9052-
static struct {
9053-
cpumask_var_t idle_cpus_mask;
9054-
atomic_t nr_cpus;
9055-
unsigned long next_balance; /* in jiffy units */
9056-
unsigned long next_stats;
9057-
} nohz ____cacheline_aligned;
90589095

90599096
static inline int find_new_ilb(void)
90609097
{

kernel/sched/sched.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -762,6 +762,7 @@ struct rq {
762762
#ifdef CONFIG_NO_HZ_COMMON
763763
#ifdef CONFIG_SMP
764764
unsigned long last_load_update_tick;
765+
unsigned long last_blocked_load_update_tick;
765766
#endif /* CONFIG_SMP */
766767
atomic_t nohz_flags;
767768
#endif /* CONFIG_NO_HZ_COMMON */

0 commit comments

Comments
 (0)