Skip to content

Commit 31e77c9

Browse files
vingu-linaroIngo Molnar
authored andcommitted
sched/fair: Update blocked load when newly idle
When NEWLY_IDLE load balance is not triggered, we might need to update the blocked load anyway. We can kick an ilb so an idle CPU will take care of updating blocked load or we can try to update them locally before entering idle. In the latter case, we reuse part of the nohz_idle_balance. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: brendan.jackman@arm.com Cc: dietmar.eggemann@arm.com Cc: morten.rasmussen@foss.arm.com Cc: valentin.schneider@arm.com Link: http://lkml.kernel.org/r/1518622006-16089-4-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 47ea541 commit 31e77c9

File tree

1 file changed

+87
-18
lines changed

1 file changed

+87
-18
lines changed

kernel/sched/fair.c

Lines changed: 87 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -9375,31 +9375,25 @@ void nohz_balance_enter_idle(int cpu)
93759375
}
93769376

93779377
/*
9378-
* In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
9379-
* rebalancing for all the CPUs for whom scheduler ticks are stopped.
9378+
* Internal function that runs load balance for all idle cpus. The load balance
9379+
* can be a simple update of blocked load or a complete load balance with
9380+
* tasks movement depending of flags.
9381+
* The function returns false if the loop has stopped before running
9382+
* through all idle CPUs.
93809383
*/
9381-
static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9384+
static bool _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
9385+
enum cpu_idle_type idle)
93829386
{
93839387
/* Earliest time when we have to do rebalance again */
93849388
unsigned long now = jiffies;
93859389
unsigned long next_balance = now + 60*HZ;
93869390
bool has_blocked_load = false;
93879391
int update_next_balance = 0;
93889392
int this_cpu = this_rq->cpu;
9389-
unsigned int flags;
93909393
int balance_cpu;
9394+
int ret = false;
93919395
struct rq *rq;
93929396

9393-
if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK))
9394-
return false;
9395-
9396-
if (idle != CPU_IDLE) {
9397-
atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9398-
return false;
9399-
}
9400-
9401-
flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9402-
94039397
SCHED_WARN_ON((flags & NOHZ_KICK_MASK) == NOHZ_BALANCE_KICK);
94049398

94059399
/*
@@ -9443,10 +9437,10 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
94439437
if (time_after_eq(jiffies, rq->next_balance)) {
94449438
struct rq_flags rf;
94459439

9446-
rq_lock_irq(rq, &rf);
9440+
rq_lock_irqsave(rq, &rf);
94479441
update_rq_clock(rq);
94489442
cpu_load_update_idle(rq);
9449-
rq_unlock_irq(rq, &rf);
9443+
rq_unlock_irqrestore(rq, &rf);
94509444

94519445
if (flags & NOHZ_BALANCE_KICK)
94529446
rebalance_domains(rq, CPU_IDLE);
@@ -9458,13 +9452,21 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
94589452
}
94599453
}
94609454

9461-
update_blocked_averages(this_cpu);
9455+
/* Newly idle CPU doesn't need an update */
9456+
if (idle != CPU_NEWLY_IDLE) {
9457+
update_blocked_averages(this_cpu);
9458+
has_blocked_load |= this_rq->has_blocked_load;
9459+
}
9460+
94629461
if (flags & NOHZ_BALANCE_KICK)
94639462
rebalance_domains(this_rq, CPU_IDLE);
94649463

94659464
WRITE_ONCE(nohz.next_blocked,
94669465
now + msecs_to_jiffies(LOAD_AVG_PERIOD));
94679466

9467+
/* The full idle balance loop has been done */
9468+
ret = true;
9469+
94689470
abort:
94699471
/* There is still blocked load, enable periodic update */
94709472
if (has_blocked_load)
@@ -9478,15 +9480,79 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
94789480
if (likely(update_next_balance))
94799481
nohz.next_balance = next_balance;
94809482

9483+
return ret;
9484+
}
9485+
9486+
/*
9487+
* In CONFIG_NO_HZ_COMMON case, the idle balance kickee will do the
9488+
* rebalancing for all the cpus for whom scheduler ticks are stopped.
9489+
*/
9490+
static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9491+
{
9492+
int this_cpu = this_rq->cpu;
9493+
unsigned int flags;
9494+
9495+
if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_KICK_MASK))
9496+
return false;
9497+
9498+
if (idle != CPU_IDLE) {
9499+
atomic_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9500+
return false;
9501+
}
9502+
9503+
/*
9504+
* barrier, pairs with nohz_balance_enter_idle(), ensures ...
9505+
*/
9506+
flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(this_cpu));
9507+
if (!(flags & NOHZ_KICK_MASK))
9508+
return false;
9509+
9510+
_nohz_idle_balance(this_rq, flags, idle);
9511+
94819512
return true;
94829513
}
9514+
9515+
static void nohz_newidle_balance(struct rq *this_rq)
9516+
{
9517+
int this_cpu = this_rq->cpu;
9518+
9519+
/*
9520+
* This CPU doesn't want to be disturbed by scheduler
9521+
* housekeeping
9522+
*/
9523+
if (!housekeeping_cpu(this_cpu, HK_FLAG_SCHED))
9524+
return;
9525+
9526+
/* Will wake up very soon. No time for doing anything else*/
9527+
if (this_rq->avg_idle < sysctl_sched_migration_cost)
9528+
return;
9529+
9530+
/* Don't need to update blocked load of idle CPUs*/
9531+
if (!READ_ONCE(nohz.has_blocked) ||
9532+
time_before(jiffies, READ_ONCE(nohz.next_blocked)))
9533+
return;
9534+
9535+
raw_spin_unlock(&this_rq->lock);
9536+
/*
9537+
* This CPU is going to be idle and blocked load of idle CPUs
9538+
* need to be updated. Run the ilb locally as it is a good
9539+
* candidate for ilb instead of waking up another idle CPU.
9540+
* Kick an normal ilb if we failed to do the update.
9541+
*/
9542+
if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
9543+
kick_ilb(NOHZ_STATS_KICK);
9544+
raw_spin_lock(&this_rq->lock);
9545+
}
9546+
94839547
#else /* !CONFIG_NO_HZ_COMMON */
94849548
static inline void nohz_balancer_kick(struct rq *rq) { }
94859549

9486-
static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
9550+
static inline bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
94879551
{
94889552
return false;
94899553
}
9554+
9555+
static inline void nohz_newidle_balance(struct rq *this_rq) { }
94909556
#endif /* CONFIG_NO_HZ_COMMON */
94919557

94929558
/*
@@ -9523,12 +9589,15 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
95239589

95249590
if (this_rq->avg_idle < sysctl_sched_migration_cost ||
95259591
!this_rq->rd->overload) {
9592+
95269593
rcu_read_lock();
95279594
sd = rcu_dereference_check_sched_domain(this_rq->sd);
95289595
if (sd)
95299596
update_next_balance(sd, &next_balance);
95309597
rcu_read_unlock();
95319598

9599+
nohz_newidle_balance(this_rq);
9600+
95329601
goto out;
95339602
}
95349603

0 commit comments

Comments
 (0)