Skip to content

Commit fb7b26e

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner: "This updates contains the following changes: - Fix a signal handling regression in the bit wait functions. - Avoid false positive warnings in the wakeup path. - Initialize the scheduler root domain properly. - Handle gtime calculations in proc/$PID/stat proper. - Add more documentation for the barriers in try_to_wake_up(). - Fix a subtle race in try_to_wake_up() which might cause a task to be scheduled on two cpus - Compile static helper function only when it is used" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/core: Fix an SMP ordering race in try_to_wake_up() vs. schedule() sched/core: Better document the try_to_wake_up() barriers sched/cputime: Fix invalid gtime in proc sched/core: Clear the root_domain cpumasks in init_rootdomain() sched/core: Remove false-positive warning from wake_up_process() sched/wait: Fix signal handling in bit wait helpers sched/rt: Hide the push_irq_work_func() declaration
2 parents 69d2ca6 + ecf7d01 commit fb7b26e

File tree

5 files changed

+45
-15
lines changed

5 files changed

+45
-15
lines changed

kernel/sched/core.c

Lines changed: 30 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1946,14 +1946,39 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
19461946
goto stat;
19471947

19481948
#ifdef CONFIG_SMP
1949+
/*
1950+
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
1951+
* possible to, falsely, observe p->on_cpu == 0.
1952+
*
1953+
* One must be running (->on_cpu == 1) in order to remove oneself
1954+
* from the runqueue.
1955+
*
1956+
* [S] ->on_cpu = 1; [L] ->on_rq
1957+
* UNLOCK rq->lock
1958+
* RMB
1959+
* LOCK rq->lock
1960+
* [S] ->on_rq = 0; [L] ->on_cpu
1961+
*
1962+
* Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock
1963+
* from the consecutive calls to schedule(); the first switching to our
1964+
* task, the second putting it to sleep.
1965+
*/
1966+
smp_rmb();
1967+
19491968
/*
19501969
* If the owning (remote) cpu is still in the middle of schedule() with
19511970
* this task as prev, wait until its done referencing the task.
19521971
*/
19531972
while (p->on_cpu)
19541973
cpu_relax();
19551974
/*
1956-
* Pairs with the smp_wmb() in finish_lock_switch().
1975+
* Combined with the control dependency above, we have an effective
1976+
* smp_load_acquire() without the need for full barriers.
1977+
*
1978+
* Pairs with the smp_store_release() in finish_lock_switch().
1979+
*
1980+
* This ensures that tasks getting woken will be fully ordered against
1981+
* their previous state and preserve Program Order.
19571982
*/
19581983
smp_rmb();
19591984

@@ -2039,7 +2064,6 @@ static void try_to_wake_up_local(struct task_struct *p)
20392064
*/
20402065
int wake_up_process(struct task_struct *p)
20412066
{
2042-
WARN_ON(task_is_stopped_or_traced(p));
20432067
return try_to_wake_up(p, TASK_NORMAL, 0);
20442068
}
20452069
EXPORT_SYMBOL(wake_up_process);
@@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd)
58475871
{
58485872
memset(rd, 0, sizeof(*rd));
58495873

5850-
if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
5874+
if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))
58515875
goto out;
5852-
if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
5876+
if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL))
58535877
goto free_span;
5854-
if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
5878+
if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL))
58555879
goto free_online;
5856-
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5880+
if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
58575881
goto free_dlo_mask;
58585882

58595883
init_dl_bw(&rd->dl_bw);

kernel/sched/cputime.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t)
788788
unsigned int seq;
789789
cputime_t gtime;
790790

791+
if (!context_tracking_is_enabled())
792+
return t->gtime;
793+
791794
do {
792795
seq = read_seqbegin(&t->vtime_seqlock);
793796

kernel/sched/rt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
6464
raw_spin_unlock(&rt_b->rt_runtime_lock);
6565
}
6666

67-
#ifdef CONFIG_SMP
67+
#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
6868
static void push_irq_work_func(struct irq_work *work);
6969
#endif
7070

kernel/sched/sched.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
10731073
* We must ensure this doesn't happen until the switch is completely
10741074
* finished.
10751075
*
1076+
* In particular, the load of prev->state in finish_task_switch() must
1077+
* happen before this.
1078+
*
10761079
* Pairs with the control dependency and rmb in try_to_wake_up().
10771080
*/
10781081
smp_store_release(&prev->on_cpu, 0);

kernel/sched/wait.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -583,42 +583,42 @@ EXPORT_SYMBOL(wake_up_atomic_t);
583583

584584
__sched int bit_wait(struct wait_bit_key *word)
585585
{
586-
if (signal_pending_state(current->state, current))
587-
return 1;
588586
schedule();
587+
if (signal_pending(current))
588+
return -EINTR;
589589
return 0;
590590
}
591591
EXPORT_SYMBOL(bit_wait);
592592

593593
__sched int bit_wait_io(struct wait_bit_key *word)
594594
{
595-
if (signal_pending_state(current->state, current))
596-
return 1;
597595
io_schedule();
596+
if (signal_pending(current))
597+
return -EINTR;
598598
return 0;
599599
}
600600
EXPORT_SYMBOL(bit_wait_io);
601601

602602
__sched int bit_wait_timeout(struct wait_bit_key *word)
603603
{
604604
unsigned long now = READ_ONCE(jiffies);
605-
if (signal_pending_state(current->state, current))
606-
return 1;
607605
if (time_after_eq(now, word->timeout))
608606
return -EAGAIN;
609607
schedule_timeout(word->timeout - now);
608+
if (signal_pending(current))
609+
return -EINTR;
610610
return 0;
611611
}
612612
EXPORT_SYMBOL_GPL(bit_wait_timeout);
613613

614614
__sched int bit_wait_io_timeout(struct wait_bit_key *word)
615615
{
616616
unsigned long now = READ_ONCE(jiffies);
617-
if (signal_pending_state(current->state, current))
618-
return 1;
619617
if (time_after_eq(now, word->timeout))
620618
return -EAGAIN;
621619
io_schedule_timeout(word->timeout - now);
620+
if (signal_pending(current))
621+
return -EINTR;
622622
return 0;
623623
}
624624
EXPORT_SYMBOL_GPL(bit_wait_io_timeout);

0 commit comments

Comments
 (0)