Skip to content

Commit 17bf423

Browse files
committed
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this cycle were: - Introduce "Energy Aware Scheduling" - by Quentin Perret. This is a coherent topology description of CPUs in cooperation with the PM subsystem, with the goal to schedule more energy-efficiently on asymetric SMP platform - such as waking up tasks to the more energy-efficient CPUs first, as long as the system isn't oversubscribed. For details of the design, see: https://lore.kernel.org/lkml/20180724122521.22109-1-quentin.perret@arm.com/ - Misc cleanups and smaller enhancements" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) sched/fair: Select an energy-efficient CPU on task wake-up sched/fair: Introduce an energy estimation helper function sched/fair: Add over-utilization/tipping point indicator sched/fair: Clean-up update_sg_lb_stats parameters sched/toplogy: Introduce the 'sched_energy_present' static key sched/topology: Make Energy Aware Scheduling depend on schedutil sched/topology: Disable EAS on inappropriate platforms sched/topology: Add lowest CPU asymmetry sched_domain level pointer sched/topology: Reference the Energy Model of CPUs when available PM: Introduce an Energy Model management framework sched/cpufreq: Prepare schedutil for Energy Aware Scheduling sched/topology: Relocate arch_scale_cpu_capacity() to the internal header sched/core: Remove unnecessary unlikely() in push_*_task() sched/topology: Remove the ::smt_gain field from 'struct sched_domain' sched: Fix various typos in comments sched/core: Clean up the #ifdef block in add_nr_running() sched/fair: Make some variables static sched/core: Create task_has_idle_policy() helper sched/fair: Add lsub_positive() and use it consistently sched/fair: Mask UTIL_AVG_UNCHANGED usages ...
2 parents 116b081 + 732cd75 commit 17bf423

File tree

22 files changed

+1179
-150
lines changed

22 files changed

+1179
-150
lines changed

drivers/cpufreq/cpufreq.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2277,6 +2277,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
22772277
ret = cpufreq_start_governor(policy);
22782278
if (!ret) {
22792279
pr_debug("cpufreq: governor change\n");
2280+
sched_cpufreq_governor_change(policy, old_gov);
22802281
return 0;
22812282
}
22822283
cpufreq_exit_governor(policy);

include/linux/cpufreq.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -950,6 +950,14 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy)
950950
}
951951
#endif
952952

953+
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
954+
void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
955+
struct cpufreq_governor *old_gov);
956+
#else
957+
static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
958+
struct cpufreq_governor *old_gov) { }
959+
#endif
960+
953961
extern void arch_freq_prepare_all(void);
954962
extern unsigned int arch_freq_get_on_cpu(int cpu);
955963

include/linux/energy_model.h

Lines changed: 187 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,187 @@
1+
/* SPDX-License-Identifier: GPL-2.0 */
2+
#ifndef _LINUX_ENERGY_MODEL_H
3+
#define _LINUX_ENERGY_MODEL_H
4+
#include <linux/cpumask.h>
5+
#include <linux/jump_label.h>
6+
#include <linux/kobject.h>
7+
#include <linux/rcupdate.h>
8+
#include <linux/sched/cpufreq.h>
9+
#include <linux/sched/topology.h>
10+
#include <linux/types.h>
11+
12+
#ifdef CONFIG_ENERGY_MODEL
13+
/**
14+
* em_cap_state - Capacity state of a performance domain
15+
* @frequency: The CPU frequency in KHz, for consistency with CPUFreq
16+
* @power: The power consumed by 1 CPU at this level, in milli-watts
17+
* @cost: The cost coefficient associated with this level, used during
18+
* energy calculation. Equal to: power * max_frequency / frequency
19+
*/
20+
struct em_cap_state {
21+
unsigned long frequency;
22+
unsigned long power;
23+
unsigned long cost;
24+
};
25+
26+
/**
27+
* em_perf_domain - Performance domain
28+
* @table: List of capacity states, in ascending order
29+
* @nr_cap_states: Number of capacity states
30+
* @cpus: Cpumask covering the CPUs of the domain
31+
*
32+
* A "performance domain" represents a group of CPUs whose performance is
33+
* scaled together. All CPUs of a performance domain must have the same
34+
* micro-architecture. Performance domains often have a 1-to-1 mapping with
35+
* CPUFreq policies.
36+
*/
37+
struct em_perf_domain {
38+
struct em_cap_state *table;
39+
int nr_cap_states;
40+
unsigned long cpus[0];
41+
};
42+
43+
#define EM_CPU_MAX_POWER 0xFFFF
44+
45+
struct em_data_callback {
46+
/**
47+
* active_power() - Provide power at the next capacity state of a CPU
48+
* @power : Active power at the capacity state in mW (modified)
49+
* @freq : Frequency at the capacity state in kHz (modified)
50+
* @cpu : CPU for which we do this operation
51+
*
52+
* active_power() must find the lowest capacity state of 'cpu' above
53+
* 'freq' and update 'power' and 'freq' to the matching active power
54+
* and frequency.
55+
*
56+
* The power is the one of a single CPU in the domain, expressed in
57+
* milli-watts. It is expected to fit in the [0, EM_CPU_MAX_POWER]
58+
* range.
59+
*
60+
* Return 0 on success.
61+
*/
62+
int (*active_power)(unsigned long *power, unsigned long *freq, int cpu);
63+
};
64+
#define EM_DATA_CB(_active_power_cb) { .active_power = &_active_power_cb }
65+
66+
struct em_perf_domain *em_cpu_get(int cpu);
67+
int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
68+
struct em_data_callback *cb);
69+
70+
/**
71+
* em_pd_energy() - Estimates the energy consumed by the CPUs of a perf. domain
72+
* @pd : performance domain for which energy has to be estimated
73+
* @max_util : highest utilization among CPUs of the domain
74+
* @sum_util : sum of the utilization of all CPUs in the domain
75+
*
76+
* Return: the sum of the energy consumed by the CPUs of the domain assuming
77+
* a capacity state satisfying the max utilization of the domain.
78+
*/
79+
static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
80+
unsigned long max_util, unsigned long sum_util)
81+
{
82+
unsigned long freq, scale_cpu;
83+
struct em_cap_state *cs;
84+
int i, cpu;
85+
86+
/*
87+
* In order to predict the capacity state, map the utilization of the
88+
* most utilized CPU of the performance domain to a requested frequency,
89+
* like schedutil.
90+
*/
91+
cpu = cpumask_first(to_cpumask(pd->cpus));
92+
scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
93+
cs = &pd->table[pd->nr_cap_states - 1];
94+
freq = map_util_freq(max_util, cs->frequency, scale_cpu);
95+
96+
/*
97+
* Find the lowest capacity state of the Energy Model above the
98+
* requested frequency.
99+
*/
100+
for (i = 0; i < pd->nr_cap_states; i++) {
101+
cs = &pd->table[i];
102+
if (cs->frequency >= freq)
103+
break;
104+
}
105+
106+
/*
107+
* The capacity of a CPU in the domain at that capacity state (cs)
108+
* can be computed as:
109+
*
110+
* cs->freq * scale_cpu
111+
* cs->cap = -------------------- (1)
112+
* cpu_max_freq
113+
*
114+
* So, ignoring the costs of idle states (which are not available in
115+
* the EM), the energy consumed by this CPU at that capacity state is
116+
* estimated as:
117+
*
118+
* cs->power * cpu_util
119+
* cpu_nrg = -------------------- (2)
120+
* cs->cap
121+
*
122+
* since 'cpu_util / cs->cap' represents its percentage of busy time.
123+
*
124+
* NOTE: Although the result of this computation actually is in
125+
* units of power, it can be manipulated as an energy value
126+
* over a scheduling period, since it is assumed to be
127+
* constant during that interval.
128+
*
129+
* By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
130+
* of two terms:
131+
*
132+
* cs->power * cpu_max_freq cpu_util
133+
* cpu_nrg = ------------------------ * --------- (3)
134+
* cs->freq scale_cpu
135+
*
136+
* The first term is static, and is stored in the em_cap_state struct
137+
* as 'cs->cost'.
138+
*
139+
* Since all CPUs of the domain have the same micro-architecture, they
140+
* share the same 'cs->cost', and the same CPU capacity. Hence, the
141+
* total energy of the domain (which is the simple sum of the energy of
142+
* all of its CPUs) can be factorized as:
143+
*
144+
* cs->cost * \Sum cpu_util
145+
* pd_nrg = ------------------------ (4)
146+
* scale_cpu
147+
*/
148+
return cs->cost * sum_util / scale_cpu;
149+
}
150+
151+
/**
152+
* em_pd_nr_cap_states() - Get the number of capacity states of a perf. domain
153+
* @pd : performance domain for which this must be done
154+
*
155+
* Return: the number of capacity states in the performance domain table
156+
*/
157+
static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
158+
{
159+
return pd->nr_cap_states;
160+
}
161+
162+
#else
163+
struct em_perf_domain {};
164+
struct em_data_callback {};
165+
#define EM_DATA_CB(_active_power_cb) { }
166+
167+
static inline int em_register_perf_domain(cpumask_t *span,
168+
unsigned int nr_states, struct em_data_callback *cb)
169+
{
170+
return -EINVAL;
171+
}
172+
static inline struct em_perf_domain *em_cpu_get(int cpu)
173+
{
174+
return NULL;
175+
}
176+
static inline unsigned long em_pd_energy(struct em_perf_domain *pd,
177+
unsigned long max_util, unsigned long sum_util)
178+
{
179+
return 0;
180+
}
181+
static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
182+
{
183+
return 0;
184+
}
185+
#endif
186+
187+
#endif

include/linux/sched.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ struct task_group;
176176
* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
177177
*
178178
* However, with slightly different timing the wakeup TASK_RUNNING store can
179-
* also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
179+
* also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
180180
* a problem either because that will result in one extra go around the loop
181181
* and our @cond test will save the day.
182182
*
@@ -515,7 +515,7 @@ struct sched_dl_entity {
515515

516516
/*
517517
* Actual scheduling parameters. Initialized with the values above,
518-
* they are continously updated during task execution. Note that
518+
* they are continuously updated during task execution. Note that
519519
* the remaining runtime could be < 0 in case we are in overrun.
520520
*/
521521
s64 runtime; /* Remaining runtime for this instance */

include/linux/sched/cpufreq.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,12 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
2020
void (*func)(struct update_util_data *data, u64 time,
2121
unsigned int flags));
2222
void cpufreq_remove_update_util_hook(int cpu);
23+
24+
static inline unsigned long map_util_freq(unsigned long util,
25+
unsigned long freq, unsigned long cap)
26+
{
27+
return (freq + (freq >> 2)) * util / cap;
28+
}
2329
#endif /* CONFIG_CPU_FREQ */
2430

2531
#endif /* _LINUX_SCHED_CPUFREQ_H */

include/linux/sched/isolation.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ enum hk_flags {
1616
};
1717

1818
#ifdef CONFIG_CPU_ISOLATION
19-
DECLARE_STATIC_KEY_FALSE(housekeeping_overriden);
19+
DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
2020
extern int housekeeping_any_cpu(enum hk_flags flags);
2121
extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
2222
extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
@@ -43,7 +43,7 @@ static inline void housekeeping_init(void) { }
4343
static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
4444
{
4545
#ifdef CONFIG_CPU_ISOLATION
46-
if (static_branch_unlikely(&housekeeping_overriden))
46+
if (static_branch_unlikely(&housekeeping_overridden))
4747
return housekeeping_test_cpu(cpu, flags);
4848
#endif
4949
return true;

include/linux/sched/mm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ static inline gfp_t current_gfp_context(gfp_t flags)
153153
{
154154
/*
155155
* NOIO implies both NOIO and NOFS and it is a weaker context
156-
* so always make sure it makes precendence
156+
* so always make sure it makes precedence
157157
*/
158158
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
159159
flags &= ~(__GFP_IO | __GFP_FS);

include/linux/sched/stat.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
* Various counters maintained by the scheduler and fork(),
99
* exposed via /proc, sys.c or used by drivers via these APIs.
1010
*
11-
* ( Note that all these values are aquired without locking,
11+
* ( Note that all these values are acquired without locking,
1212
* so they can only be relied on in narrow circumstances. )
1313
*/
1414

include/linux/sched/topology.h

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,6 @@ struct sched_domain {
8989
unsigned int newidle_idx;
9090
unsigned int wake_idx;
9191
unsigned int forkexec_idx;
92-
unsigned int smt_gain;
9392

9493
int nohz_idle; /* NOHZ IDLE status */
9594
int flags; /* See SD_* */
@@ -202,6 +201,14 @@ extern void set_sched_topology(struct sched_domain_topology_level *tl);
202201
# define SD_INIT_NAME(type)
203202
#endif
204203

204+
#ifndef arch_scale_cpu_capacity
205+
static __always_inline
206+
unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
207+
{
208+
return SCHED_CAPACITY_SCALE;
209+
}
210+
#endif
211+
205212
#else /* CONFIG_SMP */
206213

207214
struct sched_domain_attr;
@@ -217,6 +224,14 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
217224
return true;
218225
}
219226

227+
#ifndef arch_scale_cpu_capacity
228+
static __always_inline
229+
unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
230+
{
231+
return SCHED_CAPACITY_SCALE;
232+
}
233+
#endif
234+
220235
#endif /* !CONFIG_SMP */
221236

222237
static inline int task_node(const struct task_struct *p)

kernel/power/Kconfig

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -298,3 +298,18 @@ config PM_GENERIC_DOMAINS_OF
298298

299299
config CPU_PM
300300
bool
301+
302+
config ENERGY_MODEL
303+
bool "Energy Model for CPUs"
304+
depends on SMP
305+
depends on CPU_FREQ
306+
default n
307+
help
308+
Several subsystems (thermal and/or the task scheduler for example)
309+
can leverage information about the energy consumed by CPUs to make
310+
smarter decisions. This config option enables the framework from
311+
which subsystems can access the energy models.
312+
313+
The exact usage of the energy model is subsystem-dependent.
314+
315+
If in doubt, say N.

kernel/power/Makefile

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,3 +15,5 @@ obj-$(CONFIG_PM_AUTOSLEEP) += autosleep.o
1515
obj-$(CONFIG_PM_WAKELOCKS) += wakelock.o
1616

1717
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
18+
19+
obj-$(CONFIG_ENERGY_MODEL) += energy_model.o

0 commit comments

Comments
 (0)