Skip to content

Commit 4314daa

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes: various scheduler metrics corner case fixes, a sched_features deadlock fix, and a topology fix for certain NUMA systems" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix kernel-doc notation warning sched/fair: Fix load_balance redo for !imbalance sched/fair: Fix scale_rt_capacity() for SMT sched/fair: Fix vruntime_normalized() for remote non-migration wakeup sched/pelt: Fix update_blocked_averages() for RT and DL classes sched/topology: Set correct NUMA topology type sched/debug: Fix potential deadlock when writing to sched_features
2 parents c0be92b + 882a78a commit 4314daa

File tree

3 files changed

+22
-15
lines changed

3 files changed

+22
-15
lines changed

kernel/sched/debug.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,12 +89,12 @@ struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
8989

9090
static void sched_feat_disable(int i)
9191
{
92-
static_key_disable(&sched_feat_keys[i]);
92+
static_key_disable_cpuslocked(&sched_feat_keys[i]);
9393
}
9494

9595
static void sched_feat_enable(int i)
9696
{
97-
static_key_enable(&sched_feat_keys[i]);
97+
static_key_enable_cpuslocked(&sched_feat_keys[i]);
9898
}
9999
#else
100100
static void sched_feat_disable(int i) { };
@@ -146,9 +146,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
146146

147147
/* Ensure the static_key remains in a consistent state */
148148
inode = file_inode(filp);
149+
cpus_read_lock();
149150
inode_lock(inode);
150151
ret = sched_feat_set(cmp);
151152
inode_unlock(inode);
153+
cpus_read_unlock();
152154
if (ret < 0)
153155
return ret;
154156

kernel/sched/fair.c

Lines changed: 17 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3362,6 +3362,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
33623362
* attach_entity_load_avg - attach this entity to its cfs_rq load avg
33633363
* @cfs_rq: cfs_rq to attach to
33643364
* @se: sched_entity to attach
3365+
* @flags: migration hints
33653366
*
33663367
* Must call update_cfs_rq_load_avg() before this, since we rely on
33673368
* cfs_rq->avg.last_update_time being current.
@@ -7263,6 +7264,7 @@ static void update_blocked_averages(int cpu)
72637264
{
72647265
struct rq *rq = cpu_rq(cpu);
72657266
struct cfs_rq *cfs_rq, *pos;
7267+
const struct sched_class *curr_class;
72667268
struct rq_flags rf;
72677269
bool done = true;
72687270

@@ -7299,8 +7301,10 @@ static void update_blocked_averages(int cpu)
72997301
if (cfs_rq_has_blocked(cfs_rq))
73007302
done = false;
73017303
}
7302-
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
7303-
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
7304+
7305+
curr_class = rq->curr->sched_class;
7306+
update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
7307+
update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
73047308
update_irq_load_avg(rq, 0);
73057309
/* Don't need periodic decay once load/util_avg are null */
73067310
if (others_have_blocked(rq))
@@ -7365,13 +7369,16 @@ static inline void update_blocked_averages(int cpu)
73657369
{
73667370
struct rq *rq = cpu_rq(cpu);
73677371
struct cfs_rq *cfs_rq = &rq->cfs;
7372+
const struct sched_class *curr_class;
73687373
struct rq_flags rf;
73697374

73707375
rq_lock_irqsave(rq, &rf);
73717376
update_rq_clock(rq);
73727377
update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq);
7373-
update_rt_rq_load_avg(rq_clock_task(rq), rq, 0);
7374-
update_dl_rq_load_avg(rq_clock_task(rq), rq, 0);
7378+
7379+
curr_class = rq->curr->sched_class;
7380+
update_rt_rq_load_avg(rq_clock_task(rq), rq, curr_class == &rt_sched_class);
7381+
update_dl_rq_load_avg(rq_clock_task(rq), rq, curr_class == &dl_sched_class);
73757382
update_irq_load_avg(rq, 0);
73767383
#ifdef CONFIG_NO_HZ_COMMON
73777384
rq->last_blocked_load_update_tick = jiffies;
@@ -7482,10 +7489,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
74827489
return load_idx;
74837490
}
74847491

7485-
static unsigned long scale_rt_capacity(int cpu)
7492+
static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
74867493
{
74877494
struct rq *rq = cpu_rq(cpu);
7488-
unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
7495+
unsigned long max = arch_scale_cpu_capacity(sd, cpu);
74897496
unsigned long used, free;
74907497
unsigned long irq;
74917498

@@ -7507,7 +7514,7 @@ static unsigned long scale_rt_capacity(int cpu)
75077514

75087515
static void update_cpu_capacity(struct sched_domain *sd, int cpu)
75097516
{
7510-
unsigned long capacity = scale_rt_capacity(cpu);
7517+
unsigned long capacity = scale_rt_capacity(sd, cpu);
75117518
struct sched_group *sdg = sd->groups;
75127519

75137520
cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
@@ -8269,7 +8276,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
82698276
force_balance:
82708277
/* Looks like there is an imbalance. Compute it */
82718278
calculate_imbalance(env, &sds);
8272-
return sds.busiest;
8279+
return env->imbalance ? sds.busiest : NULL;
82738280

82748281
out_balanced:
82758282
env->imbalance = 0;
@@ -9638,7 +9645,8 @@ static inline bool vruntime_normalized(struct task_struct *p)
96389645
* - A task which has been woken up by try_to_wake_up() and
96399646
* waiting for actually being woken up by sched_ttwu_pending().
96409647
*/
9641-
if (!se->sum_exec_runtime || p->state == TASK_WAKING)
9648+
if (!se->sum_exec_runtime ||
9649+
(p->state == TASK_WAKING && p->sched_remote_wakeup))
96429650
return true;
96439651

96449652
return false;

kernel/sched/topology.c

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1295,7 +1295,7 @@ static void init_numa_topology_type(void)
12951295

12961296
n = sched_max_numa_distance;
12971297

1298-
if (sched_domains_numa_levels <= 1) {
1298+
if (sched_domains_numa_levels <= 2) {
12991299
sched_numa_topology_type = NUMA_DIRECT;
13001300
return;
13011301
}
@@ -1380,9 +1380,6 @@ void sched_init_numa(void)
13801380
break;
13811381
}
13821382

1383-
if (!level)
1384-
return;
1385-
13861383
/*
13871384
* 'level' contains the number of unique distances
13881385
*

0 commit comments

Comments
 (0)