@@ -3300,15 +3300,18 @@ static void __setscheduler_params(struct task_struct *p,
3300
3300
3301
3301
/* Actually do priority change: must hold pi & rq lock. */
3302
3302
static void __setscheduler (struct rq * rq , struct task_struct * p ,
3303
- const struct sched_attr * attr )
3303
+ const struct sched_attr * attr , bool keep_boost )
3304
3304
{
3305
3305
__setscheduler_params (p , attr );
3306
3306
3307
3307
/*
3308
- * If we get here, there was no pi waiters boosting the
3309
- * task. It is safe to use the normal prio .
3308
+ * Keep a potential priority boosting if called from
3309
+ * sched_setscheduler() .
3310
3310
*/
3311
- p -> prio = normal_prio (p );
3311
+ if (keep_boost )
3312
+ p -> prio = rt_mutex_get_effective_prio (p , normal_prio (p ));
3313
+ else
3314
+ p -> prio = normal_prio (p );
3312
3315
3313
3316
if (dl_prio (p -> prio ))
3314
3317
p -> sched_class = & dl_sched_class ;
@@ -3408,7 +3411,7 @@ static int __sched_setscheduler(struct task_struct *p,
3408
3411
int newprio = dl_policy (attr -> sched_policy ) ? MAX_DL_PRIO - 1 :
3409
3412
MAX_RT_PRIO - 1 - attr -> sched_priority ;
3410
3413
int retval , oldprio , oldpolicy = -1 , queued , running ;
3411
- int policy = attr -> sched_policy ;
3414
+ int new_effective_prio , policy = attr -> sched_policy ;
3412
3415
unsigned long flags ;
3413
3416
const struct sched_class * prev_class ;
3414
3417
struct rq * rq ;
@@ -3590,15 +3593,14 @@ static int __sched_setscheduler(struct task_struct *p,
3590
3593
oldprio = p -> prio ;
3591
3594
3592
3595
/*
3593
- * Special case for priority boosted tasks.
3594
- *
3595
- * If the new priority is lower or equal (user space view)
3596
- * than the current (boosted) priority, we just store the new
3596
+ * Take priority boosted tasks into account. If the new
3597
+ * effective priority is unchanged, we just store the new
3597
3598
* normal parameters and do not touch the scheduler class and
3598
3599
* the runqueue. This will be done when the task deboost
3599
3600
* itself.
3600
3601
*/
3601
- if (rt_mutex_check_prio (p , newprio )) {
3602
+ new_effective_prio = rt_mutex_get_effective_prio (p , newprio );
3603
+ if (new_effective_prio == oldprio ) {
3602
3604
__setscheduler_params (p , attr );
3603
3605
task_rq_unlock (rq , p , & flags );
3604
3606
return 0 ;
@@ -3612,7 +3614,7 @@ static int __sched_setscheduler(struct task_struct *p,
3612
3614
put_prev_task (rq , p );
3613
3615
3614
3616
prev_class = p -> sched_class ;
3615
- __setscheduler (rq , p , attr );
3617
+ __setscheduler (rq , p , attr , true );
3616
3618
3617
3619
if (running )
3618
3620
p -> sched_class -> set_curr_task (rq );
@@ -6997,27 +6999,23 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6997
6999
unsigned long flags ;
6998
7000
long cpu = (long )hcpu ;
6999
7001
struct dl_bw * dl_b ;
7002
+ bool overflow ;
7003
+ int cpus ;
7000
7004
7001
- switch (action & ~ CPU_TASKS_FROZEN ) {
7005
+ switch (action ) {
7002
7006
case CPU_DOWN_PREPARE :
7003
- /* explicitly allow suspend */
7004
- if (!(action & CPU_TASKS_FROZEN )) {
7005
- bool overflow ;
7006
- int cpus ;
7007
-
7008
- rcu_read_lock_sched ();
7009
- dl_b = dl_bw_of (cpu );
7007
+ rcu_read_lock_sched ();
7008
+ dl_b = dl_bw_of (cpu );
7010
7009
7011
- raw_spin_lock_irqsave (& dl_b -> lock , flags );
7012
- cpus = dl_bw_cpus (cpu );
7013
- overflow = __dl_overflow (dl_b , cpus , 0 , 0 );
7014
- raw_spin_unlock_irqrestore (& dl_b -> lock , flags );
7010
+ raw_spin_lock_irqsave (& dl_b -> lock , flags );
7011
+ cpus = dl_bw_cpus (cpu );
7012
+ overflow = __dl_overflow (dl_b , cpus , 0 , 0 );
7013
+ raw_spin_unlock_irqrestore (& dl_b -> lock , flags );
7015
7014
7016
- rcu_read_unlock_sched ();
7015
+ rcu_read_unlock_sched ();
7017
7016
7018
- if (overflow )
7019
- return notifier_from_errno (- EBUSY );
7020
- }
7017
+ if (overflow )
7018
+ return notifier_from_errno (- EBUSY );
7021
7019
cpuset_update_active_cpus (false);
7022
7020
break ;
7023
7021
case CPU_DOWN_PREPARE_FROZEN :
@@ -7346,7 +7344,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7346
7344
queued = task_on_rq_queued (p );
7347
7345
if (queued )
7348
7346
dequeue_task (rq , p , 0 );
7349
- __setscheduler (rq , p , & attr );
7347
+ __setscheduler (rq , p , & attr , false );
7350
7348
if (queued ) {
7351
7349
enqueue_task (rq , p , 0 );
7352
7350
resched_curr (rq );
0 commit comments