@@ -5376,6 +5376,14 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
5376
5376
}
5377
5377
return load ;
5378
5378
}
5379
+
5380
+ static struct {
5381
+ cpumask_var_t idle_cpus_mask ;
5382
+ atomic_t nr_cpus ;
5383
+ unsigned long next_balance ; /* in jiffy units */
5384
+ unsigned long next_stats ;
5385
+ } nohz ____cacheline_aligned ;
5386
+
5379
5387
#endif /* CONFIG_NO_HZ_COMMON */
5380
5388
5381
5389
/**
@@ -7022,6 +7030,7 @@ enum fbq_type { regular, remote, all };
7022
7030
#define LBF_NEED_BREAK 0x02
7023
7031
#define LBF_DST_PINNED 0x04
7024
7032
#define LBF_SOME_PINNED 0x08
7033
+ #define LBF_NOHZ_STATS 0x10
7025
7034
7026
7035
struct lb_env {
7027
7036
struct sched_domain * sd ;
@@ -7460,6 +7469,10 @@ static void update_blocked_averages(int cpu)
7460
7469
if (cfs_rq_is_decayed (cfs_rq ))
7461
7470
list_del_leaf_cfs_rq (cfs_rq );
7462
7471
}
7472
+
7473
+ #ifdef CONFIG_NO_HZ_COMMON
7474
+ rq -> last_blocked_load_update_tick = jiffies ;
7475
+ #endif
7463
7476
rq_unlock_irqrestore (rq , & rf );
7464
7477
}
7465
7478
@@ -7519,6 +7532,9 @@ static inline void update_blocked_averages(int cpu)
7519
7532
rq_lock_irqsave (rq , & rf );
7520
7533
update_rq_clock (rq );
7521
7534
update_cfs_rq_load_avg (cfs_rq_clock_task (cfs_rq ), cfs_rq );
7535
+ #ifdef CONFIG_NO_HZ_COMMON
7536
+ rq -> last_blocked_load_update_tick = jiffies ;
7537
+ #endif
7522
7538
rq_unlock_irqrestore (rq , & rf );
7523
7539
}
7524
7540
@@ -7853,6 +7869,21 @@ group_type group_classify(struct sched_group *group,
7853
7869
return group_other ;
7854
7870
}
7855
7871
7872
+ static void update_nohz_stats (struct rq * rq )
7873
+ {
7874
+ #ifdef CONFIG_NO_HZ_COMMON
7875
+ unsigned int cpu = rq -> cpu ;
7876
+
7877
+ if (!cpumask_test_cpu (cpu , nohz .idle_cpus_mask ))
7878
+ return ;
7879
+
7880
+ if (!time_after (jiffies , rq -> last_blocked_load_update_tick ))
7881
+ return ;
7882
+
7883
+ update_blocked_averages (cpu );
7884
+ #endif
7885
+ }
7886
+
7856
7887
/**
7857
7888
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
7858
7889
* @env: The load balancing environment.
@@ -7875,6 +7906,9 @@ static inline void update_sg_lb_stats(struct lb_env *env,
7875
7906
for_each_cpu_and (i , sched_group_span (group ), env -> cpus ) {
7876
7907
struct rq * rq = cpu_rq (i );
7877
7908
7909
+ if (env -> flags & LBF_NOHZ_STATS )
7910
+ update_nohz_stats (rq );
7911
+
7878
7912
/* Bias balancing toward CPUs of our domain: */
7879
7913
if (local_group )
7880
7914
load = target_load (i , load_idx );
@@ -8030,6 +8064,15 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
8030
8064
if (child && child -> flags & SD_PREFER_SIBLING )
8031
8065
prefer_sibling = 1 ;
8032
8066
8067
+ #ifdef CONFIG_NO_HZ_COMMON
8068
+ if (env -> idle == CPU_NEWLY_IDLE ) {
8069
+ env -> flags |= LBF_NOHZ_STATS ;
8070
+
8071
+ if (cpumask_subset (nohz .idle_cpus_mask , sched_domain_span (env -> sd )))
8072
+ nohz .next_stats = jiffies + msecs_to_jiffies (LOAD_AVG_PERIOD );
8073
+ }
8074
+ #endif
8075
+
8033
8076
load_idx = get_sd_load_idx (env -> sd , env -> idle );
8034
8077
8035
8078
do {
@@ -9049,12 +9092,6 @@ static inline int on_null_domain(struct rq *rq)
9049
9092
* needed, they will kick the idle load balancer, which then does idle
9050
9093
* load balancing for all the idle CPUs.
9051
9094
*/
9052
- static struct {
9053
- cpumask_var_t idle_cpus_mask ;
9054
- atomic_t nr_cpus ;
9055
- unsigned long next_balance ; /* in jiffy units */
9056
- unsigned long next_stats ;
9057
- } nohz ____cacheline_aligned ;
9058
9095
9059
9096
static inline int find_new_ilb (void )
9060
9097
{
0 commit comments