Skip to content

Commit dfbca41

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched: Optimize freq invariant accounting
Currently the freq invariant accounting (in __update_entity_runnable_avg() and sched_rt_avg_update()) get the scale factor from a weak function call, this means that even for archs that default on their implementation the compiler cannot see into this function and optimize the extra scaling math away. This is sad, esp. since its a 64-bit multiplication which can be quite costly on some platforms. So replace the weak function with #ifdef and __always_inline goo. This is not quite as nice from an arch support PoV but should at least result in compile time errors if done wrong. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Ben Segall <bsegall@google.com> Cc: Morten.Rasmussen@arm.com Cc: Paul Turner <pjt@google.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Cc: dietmar.eggemann@arm.com Cc: efault@gmx.de Cc: kamalesh@linux.vnet.ibm.com Cc: nicolas.pitre@linaro.org Cc: preeti@linux.vnet.ibm.com Cc: riel@redhat.com Link: http://lkml.kernel.org/r/20150323131905.GF23123@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 1aaf90a commit dfbca41

File tree

2 files changed

+8
-13
lines changed

2 files changed

+8
-13
lines changed

kernel/sched/fair.c

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2484,8 +2484,6 @@ static u32 __compute_runnable_contrib(u64 n)
24842484
return contrib + runnable_avg_yN_sum[n];
24852485
}
24862486

2487-
unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
2488-
24892487
/*
24902488
* We can represent the historical contribution to runnable average as the
24912489
* coefficients of a geometric series. To do this we sub-divide our runnable
@@ -6010,16 +6008,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
60106008
return load_idx;
60116009
}
60126010

6013-
static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu)
6014-
{
6015-
return SCHED_CAPACITY_SCALE;
6016-
}
6017-
6018-
unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
6019-
{
6020-
return default_scale_capacity(sd, cpu);
6021-
}
6022-
60236011
static unsigned long default_scale_cpu_capacity(struct sched_domain *sd, int cpu)
60246012
{
60256013
if ((sd->flags & SD_SHARE_CPUCAPACITY) && (sd->span_weight > 1))

kernel/sched/sched.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1387,7 +1387,14 @@ static inline int hrtick_enabled(struct rq *rq)
13871387

13881388
#ifdef CONFIG_SMP
13891389
extern void sched_avg_update(struct rq *rq);
1390-
extern unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
1390+
1391+
#ifndef arch_scale_freq_capacity
1392+
static __always_inline
1393+
unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
1394+
{
1395+
return SCHED_CAPACITY_SCALE;
1396+
}
1397+
#endif
13911398

13921399
static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
13931400
{

0 commit comments

Comments
 (0)