Skip to content

Commit 3d28ebc

Browse files
amlutoIngo Molnar
authored andcommitted
x86/mm: Rework lazy TLB to track the actual loaded mm
Lazy TLB state is currently managed in a rather baroque manner. AFAICT, there are three possible states: - Non-lazy. This means that we're running a user thread or a kernel thread that has called use_mm(). current->mm == current->active_mm == cpu_tlbstate.active_mm and cpu_tlbstate.state == TLBSTATE_OK. - Lazy with user mm. We're running a kernel thread without an mm and we're borrowing an mm_struct. We have current->mm == NULL, current->active_mm == cpu_tlbstate.active_mm, cpu_tlbstate.state != TLBSTATE_OK (i.e. TLBSTATE_LAZY or 0). The current cpu is set in mm_cpumask(current->active_mm). CR3 points to current->active_mm->pgd. The TLB is up to date. - Lazy with init_mm. This happens when we call leave_mm(). We have current->mm == NULL, current->active_mm == cpu_tlbstate.active_mm, but that mm is only relelvant insofar as the scheduler is tracking it for refcounting. cpu_tlbstate.state != TLBSTATE_OK. The current cpu is clear in mm_cpumask(current->active_mm). CR3 points to swapper_pg_dir, i.e. init_mm->pgd. This patch simplifies the situation. Other than perf, x86 stops caring about current->active_mm at all. We have cpu_tlbstate.loaded_mm pointing to the mm that CR3 references. The TLB is always up to date for that mm. leave_mm() just switches us to init_mm. There are no longer any special cases for mm_cpumask, and switch_mm() switches mms without worrying about laziness. After this patch, cpu_tlbstate.state serves only to tell the TLB flush code whether it may switch to init_mm instead of doing a normal flush. This makes fairly extensive changes to xen_exit_mmap(), which used to look a bit like black magic. Perf is unchanged. With or without this change, perf may behave a bit erratically if it tries to read user memory in kernel thread context. We should build on this patch to teach perf to never look at user memory when cpu_tlbstate.loaded_mm != current->mm. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Borislav Petkov <bpetkov@suse.de> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Michal Hocko <mhocko@suse.com> Cc: Nadav Amit <nadav.amit@gmail.com> Cc: Nadav Amit <namit@vmware.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-mm@kvack.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent ce4a4e5 commit 3d28ebc

File tree

6 files changed

+147
-144
lines changed

6 files changed

+147
-144
lines changed

arch/x86/events/core.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2101,8 +2101,7 @@ static int x86_pmu_event_init(struct perf_event *event)
21012101

21022102
static void refresh_pce(void *ignored)
21032103
{
2104-
if (current->active_mm)
2105-
load_mm_cr4(current->active_mm);
2104+
load_mm_cr4(this_cpu_read(cpu_tlbstate.loaded_mm));
21062105
}
21072106

21082107
static void x86_pmu_event_mapped(struct perf_event *event)

arch/x86/include/asm/tlbflush.h

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,13 @@ static inline void invpcid_flush_all_nonglobals(void)
6666
#endif
6767

6868
struct tlb_state {
69-
struct mm_struct *active_mm;
69+
/*
70+
* cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
71+
* are on. This means that it may not match current->active_mm,
72+
* which will contain the previous user mm when we're in lazy TLB
73+
* mode even if we've already switched back to swapper_pg_dir.
74+
*/
75+
struct mm_struct *loaded_mm;
7076
int state;
7177

7278
/*
@@ -256,7 +262,9 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
256262
static inline void reset_lazy_tlbstate(void)
257263
{
258264
this_cpu_write(cpu_tlbstate.state, 0);
259-
this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
265+
this_cpu_write(cpu_tlbstate.loaded_mm, &init_mm);
266+
267+
WARN_ON(read_cr3() != __pa_symbol(swapper_pg_dir));
260268
}
261269

262270
static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,

arch/x86/kernel/ldt.c

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -22,14 +22,15 @@
2222
#include <asm/syscalls.h>
2323

2424
/* context.lock is held for us, so we don't need any locking. */
25-
static void flush_ldt(void *current_mm)
25+
static void flush_ldt(void *__mm)
2626
{
27+
struct mm_struct *mm = __mm;
2728
mm_context_t *pc;
2829

29-
if (current->active_mm != current_mm)
30+
if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm)
3031
return;
3132

32-
pc = &current->active_mm->context;
33+
pc = &mm->context;
3334
set_ldt(pc->ldt->entries, pc->ldt->size);
3435
}
3536

arch/x86/mm/init.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -811,7 +811,7 @@ void __init zone_sizes_init(void)
811811
}
812812

813813
DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
814-
.active_mm = &init_mm,
814+
.loaded_mm = &init_mm,
815815
.state = 0,
816816
.cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
817817
};

arch/x86/mm/tlb.c

Lines changed: 108 additions & 108 deletions
Original file line numberDiff line numberDiff line change
@@ -28,26 +28,25 @@
2828
* Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
2929
*/
3030

31-
/*
32-
* We cannot call mmdrop() because we are in interrupt context,
33-
* instead update mm->cpu_vm_mask.
34-
*/
3531
void leave_mm(int cpu)
3632
{
37-
struct mm_struct *active_mm = this_cpu_read(cpu_tlbstate.active_mm);
33+
struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
34+
35+
/*
36+
* It's plausible that we're in lazy TLB mode while our mm is init_mm.
37+
* If so, our callers still expect us to flush the TLB, but there
38+
* aren't any user TLB entries in init_mm to worry about.
39+
*
40+
* This needs to happen before any other sanity checks due to
41+
* intel_idle's shenanigans.
42+
*/
43+
if (loaded_mm == &init_mm)
44+
return;
45+
3846
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
3947
BUG();
40-
if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
41-
cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
42-
load_cr3(swapper_pg_dir);
43-
/*
44-
* This gets called in the idle path where RCU
45-
* functions differently. Tracing normally
46-
* uses RCU, so we have to call the tracepoint
47-
* specially here.
48-
*/
49-
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
50-
}
48+
49+
switch_mm(NULL, &init_mm, NULL);
5150
}
5251
EXPORT_SYMBOL_GPL(leave_mm);
5352

@@ -65,108 +64,109 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
6564
struct task_struct *tsk)
6665
{
6766
unsigned cpu = smp_processor_id();
67+
struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
6868

69-
if (likely(prev != next)) {
70-
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
71-
/*
72-
* If our current stack is in vmalloc space and isn't
73-
* mapped in the new pgd, we'll double-fault. Forcibly
74-
* map it.
75-
*/
76-
unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
77-
78-
pgd_t *pgd = next->pgd + stack_pgd_index;
69+
/*
70+
* NB: The scheduler will call us with prev == next when
71+
* switching from lazy TLB mode to normal mode if active_mm
72+
* isn't changing. When this happens, there is no guarantee
73+
* that CR3 (and hence cpu_tlbstate.loaded_mm) matches next.
74+
*
75+
* NB: leave_mm() calls us with prev == NULL and tsk == NULL.
76+
*/
7977

80-
if (unlikely(pgd_none(*pgd)))
81-
set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
82-
}
83-
84-
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
85-
this_cpu_write(cpu_tlbstate.active_mm, next);
78+
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8679

87-
cpumask_set_cpu(cpu, mm_cpumask(next));
80+
if (real_prev == next) {
81+
/*
82+
* There's nothing to do: we always keep the per-mm control
83+
* regs in sync with cpu_tlbstate.loaded_mm. Just
84+
* sanity-check mm_cpumask.
85+
*/
86+
if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(next))))
87+
cpumask_set_cpu(cpu, mm_cpumask(next));
88+
return;
89+
}
8890

91+
if (IS_ENABLED(CONFIG_VMAP_STACK)) {
8992
/*
90-
* Re-load page tables.
91-
*
92-
* This logic has an ordering constraint:
93-
*
94-
* CPU 0: Write to a PTE for 'next'
95-
* CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
96-
* CPU 1: set bit 1 in next's mm_cpumask
97-
* CPU 1: load from the PTE that CPU 0 writes (implicit)
98-
*
99-
* We need to prevent an outcome in which CPU 1 observes
100-
* the new PTE value and CPU 0 observes bit 1 clear in
101-
* mm_cpumask. (If that occurs, then the IPI will never
102-
* be sent, and CPU 0's TLB will contain a stale entry.)
103-
*
104-
* The bad outcome can occur if either CPU's load is
105-
* reordered before that CPU's store, so both CPUs must
106-
* execute full barriers to prevent this from happening.
107-
*
108-
* Thus, switch_mm needs a full barrier between the
109-
* store to mm_cpumask and any operation that could load
110-
* from next->pgd. TLB fills are special and can happen
111-
* due to instruction fetches or for no reason at all,
112-
* and neither LOCK nor MFENCE orders them.
113-
* Fortunately, load_cr3() is serializing and gives the
114-
* ordering guarantee we need.
115-
*
93+
* If our current stack is in vmalloc space and isn't
94+
* mapped in the new pgd, we'll double-fault. Forcibly
95+
* map it.
11696
*/
117-
load_cr3(next->pgd);
97+
unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
11898

119-
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
99+
pgd_t *pgd = next->pgd + stack_pgd_index;
120100

121-
/* Stop flush ipis for the previous mm */
122-
cpumask_clear_cpu(cpu, mm_cpumask(prev));
101+
if (unlikely(pgd_none(*pgd)))
102+
set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
103+
}
123104

124-
/* Load per-mm CR4 state */
125-
load_mm_cr4(next);
105+
this_cpu_write(cpu_tlbstate.loaded_mm, next);
106+
107+
WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
108+
cpumask_set_cpu(cpu, mm_cpumask(next));
109+
110+
/*
111+
* Re-load page tables.
112+
*
113+
* This logic has an ordering constraint:
114+
*
115+
* CPU 0: Write to a PTE for 'next'
116+
* CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
117+
* CPU 1: set bit 1 in next's mm_cpumask
118+
* CPU 1: load from the PTE that CPU 0 writes (implicit)
119+
*
120+
* We need to prevent an outcome in which CPU 1 observes
121+
* the new PTE value and CPU 0 observes bit 1 clear in
122+
* mm_cpumask. (If that occurs, then the IPI will never
123+
* be sent, and CPU 0's TLB will contain a stale entry.)
124+
*
125+
* The bad outcome can occur if either CPU's load is
126+
* reordered before that CPU's store, so both CPUs must
127+
* execute full barriers to prevent this from happening.
128+
*
129+
* Thus, switch_mm needs a full barrier between the
130+
* store to mm_cpumask and any operation that could load
131+
* from next->pgd. TLB fills are special and can happen
132+
* due to instruction fetches or for no reason at all,
133+
* and neither LOCK nor MFENCE orders them.
134+
* Fortunately, load_cr3() is serializing and gives the
135+
* ordering guarantee we need.
136+
*/
137+
load_cr3(next->pgd);
138+
139+
/*
140+
* This gets called via leave_mm() in the idle path where RCU
141+
* functions differently. Tracing normally uses RCU, so we have to
142+
* call the tracepoint specially here.
143+
*/
144+
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
145+
146+
/* Stop flush ipis for the previous mm */
147+
WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
148+
real_prev != &init_mm);
149+
cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
150+
151+
/* Load per-mm CR4 state */
152+
load_mm_cr4(next);
126153

127154
#ifdef CONFIG_MODIFY_LDT_SYSCALL
128-
/*
129-
* Load the LDT, if the LDT is different.
130-
*
131-
* It's possible that prev->context.ldt doesn't match
132-
* the LDT register. This can happen if leave_mm(prev)
133-
* was called and then modify_ldt changed
134-
* prev->context.ldt but suppressed an IPI to this CPU.
135-
* In this case, prev->context.ldt != NULL, because we
136-
* never set context.ldt to NULL while the mm still
137-
* exists. That means that next->context.ldt !=
138-
* prev->context.ldt, because mms never share an LDT.
139-
*/
140-
if (unlikely(prev->context.ldt != next->context.ldt))
141-
load_mm_ldt(next);
155+
/*
156+
* Load the LDT, if the LDT is different.
157+
*
158+
* It's possible that prev->context.ldt doesn't match
159+
* the LDT register. This can happen if leave_mm(prev)
160+
* was called and then modify_ldt changed
161+
* prev->context.ldt but suppressed an IPI to this CPU.
162+
* In this case, prev->context.ldt != NULL, because we
163+
* never set context.ldt to NULL while the mm still
164+
* exists. That means that next->context.ldt !=
165+
* prev->context.ldt, because mms never share an LDT.
166+
*/
167+
if (unlikely(real_prev->context.ldt != next->context.ldt))
168+
load_mm_ldt(next);
142169
#endif
143-
} else {
144-
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
145-
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
146-
147-
if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
148-
/*
149-
* On established mms, the mm_cpumask is only changed
150-
* from irq context, from ptep_clear_flush() while in
151-
* lazy tlb mode, and here. Irqs are blocked during
152-
* schedule, protecting us from simultaneous changes.
153-
*/
154-
cpumask_set_cpu(cpu, mm_cpumask(next));
155-
156-
/*
157-
* We were in lazy tlb mode and leave_mm disabled
158-
* tlb flush IPI delivery. We must reload CR3
159-
* to make sure to use no freed page tables.
160-
*
161-
* As above, load_cr3() is serializing and orders TLB
162-
* fills with respect to the mm_cpumask write.
163-
*/
164-
load_cr3(next->pgd);
165-
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
166-
load_mm_cr4(next);
167-
load_mm_ldt(next);
168-
}
169-
}
170170
}
171171

172172
/*
@@ -246,7 +246,7 @@ static void flush_tlb_func_remote(void *info)
246246

247247
inc_irq_stat(irq_tlb_count);
248248

249-
if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.active_mm))
249+
if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
250250
return;
251251

252252
count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
@@ -314,7 +314,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
314314
info.end = TLB_FLUSH_ALL;
315315
}
316316

317-
if (mm == current->active_mm)
317+
if (mm == this_cpu_read(cpu_tlbstate.loaded_mm))
318318
flush_tlb_func_local(&info, TLB_LOCAL_MM_SHOOTDOWN);
319319
if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
320320
flush_tlb_others(mm_cpumask(mm), &info);

0 commit comments

Comments
 (0)