Skip to content

Commit bb2ebf0

Browse files
author
Ingo Molnar
committed
Merge branch 'sched/urgent' into sched/core, before applying new patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2 parents 6a82b60 + 533445c commit bb2ebf0

File tree

3 files changed

+37
-36
lines changed

3 files changed

+37
-36
lines changed

include/linux/sched/rt.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ static inline int rt_task(struct task_struct *p)
1818
#ifdef CONFIG_RT_MUTEXES
1919
extern int rt_mutex_getprio(struct task_struct *p);
2020
extern void rt_mutex_setprio(struct task_struct *p, int prio);
21-
extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
21+
extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
2222
extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
2323
extern void rt_mutex_adjust_pi(struct task_struct *p);
2424
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
@@ -31,9 +31,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
3131
return p->normal_prio;
3232
}
3333

34-
static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
34+
static inline int rt_mutex_get_effective_prio(struct task_struct *task,
35+
int newprio)
3536
{
36-
return 0;
37+
return newprio;
3738
}
3839

3940
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)

kernel/locking/rtmutex.c

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
265265
}
266266

267267
/*
268-
* Called by sched_setscheduler() to check whether the priority change
269-
* is overruled by a possible priority boosting.
268+
* Called by sched_setscheduler() to get the priority which will be
269+
* effective after the change.
270270
*/
271-
int rt_mutex_check_prio(struct task_struct *task, int newprio)
271+
int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
272272
{
273273
if (!task_has_pi_waiters(task))
274-
return 0;
274+
return newprio;
275275

276-
return task_top_pi_waiter(task)->task->prio <= newprio;
276+
if (task_top_pi_waiter(task)->task->prio <= newprio)
277+
return task_top_pi_waiter(task)->task->prio;
278+
return newprio;
277279
}
278280

279281
/*

kernel/sched/core.c

Lines changed: 26 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -3300,15 +3300,18 @@ static void __setscheduler_params(struct task_struct *p,
33003300

33013301
/* Actually do priority change: must hold pi & rq lock. */
33023302
static void __setscheduler(struct rq *rq, struct task_struct *p,
3303-
const struct sched_attr *attr)
3303+
const struct sched_attr *attr, bool keep_boost)
33043304
{
33053305
__setscheduler_params(p, attr);
33063306

33073307
/*
3308-
* If we get here, there was no pi waiters boosting the
3309-
* task. It is safe to use the normal prio.
3308+
* Keep a potential priority boosting if called from
3309+
* sched_setscheduler().
33103310
*/
3311-
p->prio = normal_prio(p);
3311+
if (keep_boost)
3312+
p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
3313+
else
3314+
p->prio = normal_prio(p);
33123315

33133316
if (dl_prio(p->prio))
33143317
p->sched_class = &dl_sched_class;
@@ -3408,7 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p,
34083411
int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
34093412
MAX_RT_PRIO - 1 - attr->sched_priority;
34103413
int retval, oldprio, oldpolicy = -1, queued, running;
3411-
int policy = attr->sched_policy;
3414+
int new_effective_prio, policy = attr->sched_policy;
34123415
unsigned long flags;
34133416
const struct sched_class *prev_class;
34143417
struct rq *rq;
@@ -3590,15 +3593,14 @@ static int __sched_setscheduler(struct task_struct *p,
35903593
oldprio = p->prio;
35913594

35923595
/*
3593-
* Special case for priority boosted tasks.
3594-
*
3595-
* If the new priority is lower or equal (user space view)
3596-
* than the current (boosted) priority, we just store the new
3596+
* Take priority boosted tasks into account. If the new
3597+
* effective priority is unchanged, we just store the new
35973598
* normal parameters and do not touch the scheduler class and
35983599
* the runqueue. This will be done when the task deboost
35993600
* itself.
36003601
*/
3601-
if (rt_mutex_check_prio(p, newprio)) {
3602+
new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
3603+
if (new_effective_prio == oldprio) {
36023604
__setscheduler_params(p, attr);
36033605
task_rq_unlock(rq, p, &flags);
36043606
return 0;
@@ -3612,7 +3614,7 @@ static int __sched_setscheduler(struct task_struct *p,
36123614
put_prev_task(rq, p);
36133615

36143616
prev_class = p->sched_class;
3615-
__setscheduler(rq, p, attr);
3617+
__setscheduler(rq, p, attr, true);
36163618

36173619
if (running)
36183620
p->sched_class->set_curr_task(rq);
@@ -6997,27 +6999,23 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
69976999
unsigned long flags;
69987000
long cpu = (long)hcpu;
69997001
struct dl_bw *dl_b;
7002+
bool overflow;
7003+
int cpus;
70007004

7001-
switch (action & ~CPU_TASKS_FROZEN) {
7005+
switch (action) {
70027006
case CPU_DOWN_PREPARE:
7003-
/* explicitly allow suspend */
7004-
if (!(action & CPU_TASKS_FROZEN)) {
7005-
bool overflow;
7006-
int cpus;
7007-
7008-
rcu_read_lock_sched();
7009-
dl_b = dl_bw_of(cpu);
7007+
rcu_read_lock_sched();
7008+
dl_b = dl_bw_of(cpu);
70107009

7011-
raw_spin_lock_irqsave(&dl_b->lock, flags);
7012-
cpus = dl_bw_cpus(cpu);
7013-
overflow = __dl_overflow(dl_b, cpus, 0, 0);
7014-
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7010+
raw_spin_lock_irqsave(&dl_b->lock, flags);
7011+
cpus = dl_bw_cpus(cpu);
7012+
overflow = __dl_overflow(dl_b, cpus, 0, 0);
7013+
raw_spin_unlock_irqrestore(&dl_b->lock, flags);
70157014

7016-
rcu_read_unlock_sched();
7015+
rcu_read_unlock_sched();
70177016

7018-
if (overflow)
7019-
return notifier_from_errno(-EBUSY);
7020-
}
7017+
if (overflow)
7018+
return notifier_from_errno(-EBUSY);
70217019
cpuset_update_active_cpus(false);
70227020
break;
70237021
case CPU_DOWN_PREPARE_FROZEN:
@@ -7346,7 +7344,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
73467344
queued = task_on_rq_queued(p);
73477345
if (queued)
73487346
dequeue_task(rq, p, 0);
7349-
__setscheduler(rq, p, &attr);
7347+
__setscheduler(rq, p, &attr, false);
73507348
if (queued) {
73517349
enqueue_task(rq, p, 0);
73527350
resched_curr(rq);

0 commit comments

Comments
 (0)