Skip to content

Commit cbbac1c

Browse files
committed
Merge branch 'tlb/asm-generic' into aarch64/for-next/core
As agreed on the list, merge in the core mmu_gather changes which allow us to track the levels of page-table being cleared. We'll build on this in our low-level flushing routines, and Nick and Peter also have plans for other architectures. Signed-off-by: Will Deacon <will.deacon@arm.com>
2 parents 5736184 + 7526aa5 commit cbbac1c

File tree

5 files changed

+351
-262
lines changed

5 files changed

+351
-262
lines changed

MAINTAINERS

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9681,6 +9681,19 @@ S: Maintained
96819681
F: arch/arm/boot/dts/mmp*
96829682
F: arch/arm/mach-mmp/
96839683

9684+
MMU GATHER AND TLB INVALIDATION
9685+
M: Will Deacon <will.deacon@arm.com>
9686+
M: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
9687+
M: Andrew Morton <akpm@linux-foundation.org>
9688+
M: Nick Piggin <npiggin@gmail.com>
9689+
M: Peter Zijlstra <peterz@infradead.org>
9690+
L: linux-arch@vger.kernel.org
9691+
L: linux-mm@kvack.org
9692+
S: Maintained
9693+
F: arch/*/include/asm/tlb.h
9694+
F: include/asm-generic/tlb.h
9695+
F: mm/mmu_gather.c
9696+
96849697
MN88472 MEDIA DRIVER
96859698
M: Antti Palosaari <crope@iki.fi>
96869699
L: linux-media@vger.kernel.org

include/asm-generic/tlb.h

Lines changed: 74 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,8 @@
2020
#include <asm/pgalloc.h>
2121
#include <asm/tlbflush.h>
2222

23+
#ifdef CONFIG_MMU
24+
2325
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
2426
/*
2527
* Semi RCU freeing of the page directories.
@@ -97,12 +99,30 @@ struct mmu_gather {
9799
#endif
98100
unsigned long start;
99101
unsigned long end;
100-
/* we are in the middle of an operation to clear
101-
* a full mm and can make some optimizations */
102-
unsigned int fullmm : 1,
103-
/* we have performed an operation which
104-
* requires a complete flush of the tlb */
105-
need_flush_all : 1;
102+
/*
103+
* we are in the middle of an operation to clear
104+
* a full mm and can make some optimizations
105+
*/
106+
unsigned int fullmm : 1;
107+
108+
/*
109+
* we have performed an operation which
110+
* requires a complete flush of the tlb
111+
*/
112+
unsigned int need_flush_all : 1;
113+
114+
/*
115+
* we have removed page directories
116+
*/
117+
unsigned int freed_tables : 1;
118+
119+
/*
120+
* at which levels have we cleared entries?
121+
*/
122+
unsigned int cleared_ptes : 1;
123+
unsigned int cleared_pmds : 1;
124+
unsigned int cleared_puds : 1;
125+
unsigned int cleared_p4ds : 1;
106126

107127
struct mmu_gather_batch *active;
108128
struct mmu_gather_batch local;
@@ -118,6 +138,7 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb,
118138
void tlb_flush_mmu(struct mmu_gather *tlb);
119139
void arch_tlb_finish_mmu(struct mmu_gather *tlb,
120140
unsigned long start, unsigned long end, bool force);
141+
void tlb_flush_mmu_free(struct mmu_gather *tlb);
121142
extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
122143
int page_size);
123144

@@ -137,6 +158,11 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
137158
tlb->start = TASK_SIZE;
138159
tlb->end = 0;
139160
}
161+
tlb->freed_tables = 0;
162+
tlb->cleared_ptes = 0;
163+
tlb->cleared_pmds = 0;
164+
tlb->cleared_puds = 0;
165+
tlb->cleared_p4ds = 0;
140166
}
141167

142168
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
@@ -186,6 +212,25 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
186212
}
187213
#endif
188214

215+
static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
216+
{
217+
if (tlb->cleared_ptes)
218+
return PAGE_SHIFT;
219+
if (tlb->cleared_pmds)
220+
return PMD_SHIFT;
221+
if (tlb->cleared_puds)
222+
return PUD_SHIFT;
223+
if (tlb->cleared_p4ds)
224+
return P4D_SHIFT;
225+
226+
return PAGE_SHIFT;
227+
}
228+
229+
static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
230+
{
231+
return 1UL << tlb_get_unmap_shift(tlb);
232+
}
233+
189234
/*
190235
* In the case of tlb vma handling, we can optimise these away in the
191236
* case where we're doing a full MM flush. When we're doing a munmap,
@@ -219,13 +264,19 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
219264
#define tlb_remove_tlb_entry(tlb, ptep, address) \
220265
do { \
221266
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
267+
tlb->cleared_ptes = 1; \
222268
__tlb_remove_tlb_entry(tlb, ptep, address); \
223269
} while (0)
224270

225-
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
226-
do { \
227-
__tlb_adjust_range(tlb, address, huge_page_size(h)); \
228-
__tlb_remove_tlb_entry(tlb, ptep, address); \
271+
#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
272+
do { \
273+
unsigned long _sz = huge_page_size(h); \
274+
__tlb_adjust_range(tlb, address, _sz); \
275+
if (_sz == PMD_SIZE) \
276+
tlb->cleared_pmds = 1; \
277+
else if (_sz == PUD_SIZE) \
278+
tlb->cleared_puds = 1; \
279+
__tlb_remove_tlb_entry(tlb, ptep, address); \
229280
} while (0)
230281

231282
/**
@@ -239,6 +290,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
239290
#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
240291
do { \
241292
__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE); \
293+
tlb->cleared_pmds = 1; \
242294
__tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
243295
} while (0)
244296

@@ -253,6 +305,7 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
253305
#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
254306
do { \
255307
__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE); \
308+
tlb->cleared_puds = 1; \
256309
__tlb_remove_pud_tlb_entry(tlb, pudp, address); \
257310
} while (0)
258311

@@ -278,14 +331,18 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
278331
#define pte_free_tlb(tlb, ptep, address) \
279332
do { \
280333
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
334+
tlb->freed_tables = 1; \
335+
tlb->cleared_pmds = 1; \
281336
__pte_free_tlb(tlb, ptep, address); \
282337
} while (0)
283338
#endif
284339

285340
#ifndef pmd_free_tlb
286341
#define pmd_free_tlb(tlb, pmdp, address) \
287342
do { \
288-
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
343+
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
344+
tlb->freed_tables = 1; \
345+
tlb->cleared_puds = 1; \
289346
__pmd_free_tlb(tlb, pmdp, address); \
290347
} while (0)
291348
#endif
@@ -295,6 +352,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
295352
#define pud_free_tlb(tlb, pudp, address) \
296353
do { \
297354
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
355+
tlb->freed_tables = 1; \
356+
tlb->cleared_p4ds = 1; \
298357
__pud_free_tlb(tlb, pudp, address); \
299358
} while (0)
300359
#endif
@@ -304,12 +363,15 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
304363
#ifndef p4d_free_tlb
305364
#define p4d_free_tlb(tlb, pudp, address) \
306365
do { \
307-
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
366+
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
367+
tlb->freed_tables = 1; \
308368
__p4d_free_tlb(tlb, pudp, address); \
309369
} while (0)
310370
#endif
311371
#endif
312372

373+
#endif /* CONFIG_MMU */
374+
313375
#define tlb_migrate_finish(mm) do {} while (0)
314376

315377
#endif /* _ASM_GENERIC__TLB_H */

mm/Makefile

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,9 +23,9 @@ KCOV_INSTRUMENT_vmstat.o := n
2323

2424
mmu-y := nommu.o
2525
mmu-$(CONFIG_MMU) := gup.o highmem.o memory.o mincore.o \
26-
mlock.o mmap.o mprotect.o mremap.o msync.o \
27-
page_vma_mapped.o pagewalk.o pgtable-generic.o \
28-
rmap.o vmalloc.o
26+
mlock.o mmap.o mmu_gather.o mprotect.o mremap.o \
27+
msync.o page_vma_mapped.o pagewalk.o \
28+
pgtable-generic.o rmap.o vmalloc.o
2929

3030

3131
ifdef CONFIG_CROSS_MEMORY_ATTACH

0 commit comments

Comments
 (0)