@@ -9053,6 +9053,7 @@ static struct {
9053
9053
cpumask_var_t idle_cpus_mask ;
9054
9054
atomic_t nr_cpus ;
9055
9055
unsigned long next_balance ; /* in jiffy units */
9056
+ unsigned long next_stats ;
9056
9057
} nohz ____cacheline_aligned ;
9057
9058
9058
9059
static inline int find_new_ilb (void )
@@ -9087,9 +9088,8 @@ static inline void set_cpu_sd_state_busy(void)
9087
9088
* nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
9088
9089
* CPU (if there is one).
9089
9090
*/
9090
- static void kick_ilb (void )
9091
+ static void kick_ilb (unsigned int flags )
9091
9092
{
9092
- unsigned int flags ;
9093
9093
int ilb_cpu ;
9094
9094
9095
9095
nohz .next_balance ++ ;
@@ -9099,7 +9099,7 @@ static void kick_ilb(void)
9099
9099
if (ilb_cpu >= nr_cpu_ids )
9100
9100
return ;
9101
9101
9102
- flags = atomic_fetch_or (NOHZ_KICK_MASK , nohz_flags (ilb_cpu ));
9102
+ flags = atomic_fetch_or (flags , nohz_flags (ilb_cpu ));
9103
9103
if (flags & NOHZ_KICK_MASK )
9104
9104
return ;
9105
9105
@@ -9129,7 +9129,7 @@ static void nohz_balancer_kick(struct rq *rq)
9129
9129
struct sched_domain_shared * sds ;
9130
9130
struct sched_domain * sd ;
9131
9131
int nr_busy , i , cpu = rq -> cpu ;
9132
- bool kick = false ;
9132
+ unsigned int flags = 0 ;
9133
9133
9134
9134
if (unlikely (rq -> idle_balance ))
9135
9135
return ;
@@ -9148,11 +9148,14 @@ static void nohz_balancer_kick(struct rq *rq)
9148
9148
if (likely (!atomic_read (& nohz .nr_cpus )))
9149
9149
return ;
9150
9150
9151
+ if (time_after (now , nohz .next_stats ))
9152
+ flags = NOHZ_STATS_KICK ;
9153
+
9151
9154
if (time_before (now , nohz .next_balance ))
9152
- return ;
9155
+ goto out ;
9153
9156
9154
9157
if (rq -> nr_running >= 2 ) {
9155
- kick = true ;
9158
+ flags = NOHZ_KICK_MASK ;
9156
9159
goto out ;
9157
9160
}
9158
9161
@@ -9165,7 +9168,7 @@ static void nohz_balancer_kick(struct rq *rq)
9165
9168
*/
9166
9169
nr_busy = atomic_read (& sds -> nr_busy_cpus );
9167
9170
if (nr_busy > 1 ) {
9168
- kick = true ;
9171
+ flags = NOHZ_KICK_MASK ;
9169
9172
goto unlock ;
9170
9173
}
9171
9174
@@ -9175,7 +9178,7 @@ static void nohz_balancer_kick(struct rq *rq)
9175
9178
if (sd ) {
9176
9179
if ((rq -> cfs .h_nr_running >= 1 ) &&
9177
9180
check_cpu_capacity (rq , sd )) {
9178
- kick = true ;
9181
+ flags = NOHZ_KICK_MASK ;
9179
9182
goto unlock ;
9180
9183
}
9181
9184
}
@@ -9188,16 +9191,16 @@ static void nohz_balancer_kick(struct rq *rq)
9188
9191
continue ;
9189
9192
9190
9193
if (sched_asym_prefer (i , cpu )) {
9191
- kick = true ;
9194
+ flags = NOHZ_KICK_MASK ;
9192
9195
goto unlock ;
9193
9196
}
9194
9197
}
9195
9198
}
9196
9199
unlock :
9197
9200
rcu_read_unlock ();
9198
9201
out :
9199
- if (kick )
9200
- kick_ilb ();
9202
+ if (flags )
9203
+ kick_ilb (flags );
9201
9204
}
9202
9205
9203
9206
void nohz_balance_exit_idle (unsigned int cpu )
@@ -9389,7 +9392,9 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
9389
9392
static bool nohz_idle_balance (struct rq * this_rq , enum cpu_idle_type idle )
9390
9393
{
9391
9394
/* Earliest time when we have to do rebalance again */
9392
- unsigned long next_balance = jiffies + 60 * HZ ;
9395
+ unsigned long now = jiffies ;
9396
+ unsigned long next_balance = now + 60 * HZ ;
9397
+ unsigned long next_stats = now + msecs_to_jiffies (LOAD_AVG_PERIOD );
9393
9398
int update_next_balance = 0 ;
9394
9399
int this_cpu = this_rq -> cpu ;
9395
9400
unsigned int flags ;
@@ -9449,6 +9454,8 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9449
9454
if (flags & NOHZ_BALANCE_KICK )
9450
9455
rebalance_domains (this_rq , CPU_IDLE );
9451
9456
9457
+ nohz .next_stats = next_stats ;
9458
+
9452
9459
/*
9453
9460
* next_balance will be updated only when there is a need.
9454
9461
* When the CPU is attached to null domain for ex, it will not be
0 commit comments