Skip to content

Commit 47ea541

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched/fair: Move idle_balance()
We're going to want to call nohz_idle_balance() or parts thereof from idle_balance(). Since we already have a forward declaration of idle_balance() move it down such that it's below nohz_idle_balance() avoiding the need for a forward declaration for that. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent dd70724 commit 47ea541

File tree

1 file changed

+114
-114
lines changed

1 file changed

+114
-114
lines changed

kernel/sched/fair.c

Lines changed: 114 additions & 114 deletions
Original file line numberDiff line numberDiff line change
@@ -8916,120 +8916,6 @@ update_next_balance(struct sched_domain *sd, unsigned long *next_balance)
89168916
*next_balance = next;
89178917
}
89188918

8919-
/*
8920-
* idle_balance is called by schedule() if this_cpu is about to become
8921-
* idle. Attempts to pull tasks from other CPUs.
8922-
*/
8923-
static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
8924-
{
8925-
unsigned long next_balance = jiffies + HZ;
8926-
int this_cpu = this_rq->cpu;
8927-
struct sched_domain *sd;
8928-
int pulled_task = 0;
8929-
u64 curr_cost = 0;
8930-
8931-
/*
8932-
* We must set idle_stamp _before_ calling idle_balance(), such that we
8933-
* measure the duration of idle_balance() as idle time.
8934-
*/
8935-
this_rq->idle_stamp = rq_clock(this_rq);
8936-
8937-
/*
8938-
* Do not pull tasks towards !active CPUs...
8939-
*/
8940-
if (!cpu_active(this_cpu))
8941-
return 0;
8942-
8943-
/*
8944-
* This is OK, because current is on_cpu, which avoids it being picked
8945-
* for load-balance and preemption/IRQs are still disabled avoiding
8946-
* further scheduler activity on it and we're being very careful to
8947-
* re-start the picking loop.
8948-
*/
8949-
rq_unpin_lock(this_rq, rf);
8950-
8951-
if (this_rq->avg_idle < sysctl_sched_migration_cost ||
8952-
!this_rq->rd->overload) {
8953-
rcu_read_lock();
8954-
sd = rcu_dereference_check_sched_domain(this_rq->sd);
8955-
if (sd)
8956-
update_next_balance(sd, &next_balance);
8957-
rcu_read_unlock();
8958-
8959-
goto out;
8960-
}
8961-
8962-
raw_spin_unlock(&this_rq->lock);
8963-
8964-
update_blocked_averages(this_cpu);
8965-
rcu_read_lock();
8966-
for_each_domain(this_cpu, sd) {
8967-
int continue_balancing = 1;
8968-
u64 t0, domain_cost;
8969-
8970-
if (!(sd->flags & SD_LOAD_BALANCE))
8971-
continue;
8972-
8973-
if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
8974-
update_next_balance(sd, &next_balance);
8975-
break;
8976-
}
8977-
8978-
if (sd->flags & SD_BALANCE_NEWIDLE) {
8979-
t0 = sched_clock_cpu(this_cpu);
8980-
8981-
pulled_task = load_balance(this_cpu, this_rq,
8982-
sd, CPU_NEWLY_IDLE,
8983-
&continue_balancing);
8984-
8985-
domain_cost = sched_clock_cpu(this_cpu) - t0;
8986-
if (domain_cost > sd->max_newidle_lb_cost)
8987-
sd->max_newidle_lb_cost = domain_cost;
8988-
8989-
curr_cost += domain_cost;
8990-
}
8991-
8992-
update_next_balance(sd, &next_balance);
8993-
8994-
/*
8995-
* Stop searching for tasks to pull if there are
8996-
* now runnable tasks on this rq.
8997-
*/
8998-
if (pulled_task || this_rq->nr_running > 0)
8999-
break;
9000-
}
9001-
rcu_read_unlock();
9002-
9003-
raw_spin_lock(&this_rq->lock);
9004-
9005-
if (curr_cost > this_rq->max_idle_balance_cost)
9006-
this_rq->max_idle_balance_cost = curr_cost;
9007-
9008-
/*
9009-
* While browsing the domains, we released the rq lock, a task could
9010-
* have been enqueued in the meantime. Since we're not going idle,
9011-
* pretend we pulled a task.
9012-
*/
9013-
if (this_rq->cfs.h_nr_running && !pulled_task)
9014-
pulled_task = 1;
9015-
9016-
out:
9017-
/* Move the next balance forward */
9018-
if (time_after(this_rq->next_balance, next_balance))
9019-
this_rq->next_balance = next_balance;
9020-
9021-
/* Is there a task of a high priority class? */
9022-
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
9023-
pulled_task = -1;
9024-
9025-
if (pulled_task)
9026-
this_rq->idle_stamp = 0;
9027-
9028-
rq_repin_lock(this_rq, rf);
9029-
9030-
return pulled_task;
9031-
}
9032-
90338919
/*
90348920
* active_load_balance_cpu_stop is run by the CPU stopper. It pushes
90358921
* running tasks off the busiest CPU onto idle CPUs. It requires at
@@ -9603,6 +9489,120 @@ static bool nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
96039489
}
96049490
#endif /* CONFIG_NO_HZ_COMMON */
96059491

9492+
/*
9493+
* idle_balance is called by schedule() if this_cpu is about to become
9494+
* idle. Attempts to pull tasks from other CPUs.
9495+
*/
9496+
static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
9497+
{
9498+
unsigned long next_balance = jiffies + HZ;
9499+
int this_cpu = this_rq->cpu;
9500+
struct sched_domain *sd;
9501+
int pulled_task = 0;
9502+
u64 curr_cost = 0;
9503+
9504+
/*
9505+
* We must set idle_stamp _before_ calling idle_balance(), such that we
9506+
* measure the duration of idle_balance() as idle time.
9507+
*/
9508+
this_rq->idle_stamp = rq_clock(this_rq);
9509+
9510+
/*
9511+
* Do not pull tasks towards !active CPUs...
9512+
*/
9513+
if (!cpu_active(this_cpu))
9514+
return 0;
9515+
9516+
/*
9517+
* This is OK, because current is on_cpu, which avoids it being picked
9518+
* for load-balance and preemption/IRQs are still disabled avoiding
9519+
* further scheduler activity on it and we're being very careful to
9520+
* re-start the picking loop.
9521+
*/
9522+
rq_unpin_lock(this_rq, rf);
9523+
9524+
if (this_rq->avg_idle < sysctl_sched_migration_cost ||
9525+
!this_rq->rd->overload) {
9526+
rcu_read_lock();
9527+
sd = rcu_dereference_check_sched_domain(this_rq->sd);
9528+
if (sd)
9529+
update_next_balance(sd, &next_balance);
9530+
rcu_read_unlock();
9531+
9532+
goto out;
9533+
}
9534+
9535+
raw_spin_unlock(&this_rq->lock);
9536+
9537+
update_blocked_averages(this_cpu);
9538+
rcu_read_lock();
9539+
for_each_domain(this_cpu, sd) {
9540+
int continue_balancing = 1;
9541+
u64 t0, domain_cost;
9542+
9543+
if (!(sd->flags & SD_LOAD_BALANCE))
9544+
continue;
9545+
9546+
if (this_rq->avg_idle < curr_cost + sd->max_newidle_lb_cost) {
9547+
update_next_balance(sd, &next_balance);
9548+
break;
9549+
}
9550+
9551+
if (sd->flags & SD_BALANCE_NEWIDLE) {
9552+
t0 = sched_clock_cpu(this_cpu);
9553+
9554+
pulled_task = load_balance(this_cpu, this_rq,
9555+
sd, CPU_NEWLY_IDLE,
9556+
&continue_balancing);
9557+
9558+
domain_cost = sched_clock_cpu(this_cpu) - t0;
9559+
if (domain_cost > sd->max_newidle_lb_cost)
9560+
sd->max_newidle_lb_cost = domain_cost;
9561+
9562+
curr_cost += domain_cost;
9563+
}
9564+
9565+
update_next_balance(sd, &next_balance);
9566+
9567+
/*
9568+
* Stop searching for tasks to pull if there are
9569+
* now runnable tasks on this rq.
9570+
*/
9571+
if (pulled_task || this_rq->nr_running > 0)
9572+
break;
9573+
}
9574+
rcu_read_unlock();
9575+
9576+
raw_spin_lock(&this_rq->lock);
9577+
9578+
if (curr_cost > this_rq->max_idle_balance_cost)
9579+
this_rq->max_idle_balance_cost = curr_cost;
9580+
9581+
/*
9582+
* While browsing the domains, we released the rq lock, a task could
9583+
* have been enqueued in the meantime. Since we're not going idle,
9584+
* pretend we pulled a task.
9585+
*/
9586+
if (this_rq->cfs.h_nr_running && !pulled_task)
9587+
pulled_task = 1;
9588+
9589+
out:
9590+
/* Move the next balance forward */
9591+
if (time_after(this_rq->next_balance, next_balance))
9592+
this_rq->next_balance = next_balance;
9593+
9594+
/* Is there a task of a high priority class? */
9595+
if (this_rq->nr_running != this_rq->cfs.h_nr_running)
9596+
pulled_task = -1;
9597+
9598+
if (pulled_task)
9599+
this_rq->idle_stamp = 0;
9600+
9601+
rq_repin_lock(this_rq, rf);
9602+
9603+
return pulled_task;
9604+
}
9605+
96069606
/*
96079607
* run_rebalance_domains is triggered when needed from the scheduler tick.
96089608
* Also triggered for nohz idle balancing (with nohz_balancing_kick set).

0 commit comments

Comments
 (0)