Skip to content

Commit f3e9478

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
sched: Remove __ARCH_WANT_INTERRUPTS_ON_CTXSW
Now that the last architecture to use this has stopped doing so (ARM, thanks Catalin!) we can remove this complexity from the scheduler core. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Link: http://lkml.kernel.org/n/tip-g9p2a1w81xxbrze25v9zpzbf@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 5ed4f1d commit f3e9478

File tree

6 files changed

+1
-69
lines changed

6 files changed

+1
-69
lines changed

Documentation/scheduler/sched-arch.txt

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -17,16 +17,6 @@ you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file
1717
Unlocked context switches introduce only a very minor performance
1818
penalty to the core scheduler implementation in the CONFIG_SMP case.
1919

20-
2. Interrupt status
21-
By default, the switch_to arch function is called with interrupts
22-
disabled. Interrupts may be enabled over the call if it is likely to
23-
introduce a significant interrupt latency by adding the line
24-
`#define __ARCH_WANT_INTERRUPTS_ON_CTXSW` in the same place as for
25-
unlocked context switches. This define also implies
26-
`__ARCH_WANT_UNLOCKED_CTXSW`. See arch/arm/include/asm/system.h for an
27-
example.
28-
29-
3020
CPU idle
3121
========
3222
Your cpu_idle routines need to obey the following rules:

include/linux/sched.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -678,11 +678,6 @@ struct signal_struct {
678678
* (notably. ptrace) */
679679
};
680680

681-
/* Context switch must be unlocked if interrupts are to be enabled */
682-
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
683-
# define __ARCH_WANT_UNLOCKED_CTXSW
684-
#endif
685-
686681
/*
687682
* Bits in flags field of signal_struct.
688683
*/

kernel/fork.c

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1280,11 +1280,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
12801280
#endif
12811281
#ifdef CONFIG_TRACE_IRQFLAGS
12821282
p->irq_events = 0;
1283-
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1284-
p->hardirqs_enabled = 1;
1285-
#else
12861283
p->hardirqs_enabled = 0;
1287-
#endif
12881284
p->hardirq_enable_ip = 0;
12891285
p->hardirq_enable_event = 0;
12901286
p->hardirq_disable_ip = _THIS_IP_;

kernel/sched/core.c

Lines changed: 1 addition & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -1361,25 +1361,6 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
13611361
smp_send_reschedule(cpu);
13621362
}
13631363

1364-
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1365-
static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
1366-
{
1367-
struct rq *rq;
1368-
int ret = 0;
1369-
1370-
rq = __task_rq_lock(p);
1371-
if (p->on_cpu) {
1372-
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1373-
ttwu_do_wakeup(rq, p, wake_flags);
1374-
ret = 1;
1375-
}
1376-
__task_rq_unlock(rq);
1377-
1378-
return ret;
1379-
1380-
}
1381-
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1382-
13831364
bool cpus_share_cache(int this_cpu, int that_cpu)
13841365
{
13851366
return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
@@ -1440,21 +1421,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
14401421
* If the owning (remote) cpu is still in the middle of schedule() with
14411422
* this task as prev, wait until its done referencing the task.
14421423
*/
1443-
while (p->on_cpu) {
1444-
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1445-
/*
1446-
* In case the architecture enables interrupts in
1447-
* context_switch(), we cannot busy wait, since that
1448-
* would lead to deadlocks when an interrupt hits and
1449-
* tries to wake up @prev. So bail and do a complete
1450-
* remote wakeup.
1451-
*/
1452-
if (ttwu_activate_remote(p, wake_flags))
1453-
goto stat;
1454-
#else
1424+
while (p->on_cpu)
14551425
cpu_relax();
1456-
#endif
1457-
}
14581426
/*
14591427
* Pairs with the smp_wmb() in finish_lock_switch().
14601428
*/
@@ -1798,13 +1766,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
17981766
prev_state = prev->state;
17991767
account_switch_vtime(prev);
18001768
finish_arch_switch(prev);
1801-
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1802-
local_irq_disable();
1803-
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
18041769
perf_event_task_sched_in(prev, current);
1805-
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1806-
local_irq_enable();
1807-
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
18081770
finish_lock_switch(rq, prev);
18091771
finish_arch_post_lock_switch();
18101772

kernel/sched/rt.c

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1632,11 +1632,6 @@ static int push_rt_task(struct rq *rq)
16321632
if (!next_task)
16331633
return 0;
16341634

1635-
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1636-
if (unlikely(task_running(rq, next_task)))
1637-
return 0;
1638-
#endif
1639-
16401635
retry:
16411636
if (unlikely(next_task == rq->curr)) {
16421637
WARN_ON(1);

kernel/sched/sched.h

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -737,11 +737,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
737737
*/
738738
next->on_cpu = 1;
739739
#endif
740-
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
741-
raw_spin_unlock_irq(&rq->lock);
742-
#else
743740
raw_spin_unlock(&rq->lock);
744-
#endif
745741
}
746742

747743
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
@@ -755,9 +751,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
755751
smp_wmb();
756752
prev->on_cpu = 0;
757753
#endif
758-
#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
759754
local_irq_enable();
760-
#endif
761755
}
762756
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
763757

0 commit comments

Comments
 (0)