Skip to content

Commit e585513

Browse files
kirylIngo Molnar
authored andcommitted
x86/mm/gup: Switch GUP to the generic get_user_page_fast() implementation
This patch provides all required callbacks required by the generic get_user_pages_fast() code and switches x86 over - and removes the platform specific implementation. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20170606113133.22974-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 6c690ee commit e585513

File tree

12 files changed

+128
-519
lines changed

12 files changed

+128
-519
lines changed

arch/arm/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1637,7 +1637,7 @@ config ARCH_SELECT_MEMORY_MODEL
16371637
config HAVE_ARCH_PFN_VALID
16381638
def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM
16391639

1640-
config HAVE_GENERIC_RCU_GUP
1640+
config HAVE_GENERIC_GUP
16411641
def_bool y
16421642
depends on ARM_LPAE
16431643

arch/arm64/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -205,7 +205,7 @@ config GENERIC_CALIBRATE_DELAY
205205
config ZONE_DMA
206206
def_bool y
207207

208-
config HAVE_GENERIC_RCU_GUP
208+
config HAVE_GENERIC_GUP
209209
def_bool y
210210

211211
config ARCH_DMA_ADDR_T_64BIT

arch/powerpc/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -184,7 +184,7 @@ config PPC
184184
select HAVE_FUNCTION_GRAPH_TRACER
185185
select HAVE_FUNCTION_TRACER
186186
select HAVE_GCC_PLUGINS
187-
select HAVE_GENERIC_RCU_GUP
187+
select HAVE_GENERIC_GUP
188188
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx)
189189
select HAVE_IDE
190190
select HAVE_IOREMAP_PROT

arch/x86/Kconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2797,6 +2797,9 @@ config X86_DMA_REMAP
27972797
bool
27982798
depends on STA2X11
27992799

2800+
config HAVE_GENERIC_GUP
2801+
def_bool y
2802+
28002803
source "net/Kconfig"
28012804

28022805
source "drivers/Kconfig"

arch/x86/include/asm/mmu_context.h

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -218,18 +218,6 @@ static inline int vma_pkey(struct vm_area_struct *vma)
218218
}
219219
#endif
220220

221-
static inline bool __pkru_allows_pkey(u16 pkey, bool write)
222-
{
223-
u32 pkru = read_pkru();
224-
225-
if (!__pkru_allows_read(pkru, pkey))
226-
return false;
227-
if (write && !__pkru_allows_write(pkru, pkey))
228-
return false;
229-
230-
return true;
231-
}
232-
233221
/*
234222
* We only want to enforce protection keys on the current process
235223
* because we effectively have no access to PKRU for other

arch/x86/include/asm/pgtable-3level.h

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -212,4 +212,51 @@ static inline pud_t native_pudp_get_and_clear(pud_t *pudp)
212212
#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
213213
#define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } })
214214

215+
#define gup_get_pte gup_get_pte
216+
/*
217+
* WARNING: only to be used in the get_user_pages_fast() implementation.
218+
*
219+
* With get_user_pages_fast(), we walk down the pagetables without taking
220+
* any locks. For this we would like to load the pointers atomically,
221+
* but that is not possible (without expensive cmpxchg8b) on PAE. What
222+
* we do have is the guarantee that a PTE will only either go from not
223+
* present to present, or present to not present or both -- it will not
224+
* switch to a completely different present page without a TLB flush in
225+
* between; something that we are blocking by holding interrupts off.
226+
*
227+
* Setting ptes from not present to present goes:
228+
*
229+
* ptep->pte_high = h;
230+
* smp_wmb();
231+
* ptep->pte_low = l;
232+
*
233+
* And present to not present goes:
234+
*
235+
* ptep->pte_low = 0;
236+
* smp_wmb();
237+
* ptep->pte_high = 0;
238+
*
239+
* We must ensure here that the load of pte_low sees 'l' iff pte_high
240+
* sees 'h'. We load pte_high *after* loading pte_low, which ensures we
241+
* don't see an older value of pte_high. *Then* we recheck pte_low,
242+
* which ensures that we haven't picked up a changed pte high. We might
243+
* have gotten rubbish values from pte_low and pte_high, but we are
244+
* guaranteed that pte_low will not have the present bit set *unless*
245+
* it is 'l'. Because get_user_pages_fast() only operates on present ptes
246+
* we're safe.
247+
*/
248+
static inline pte_t gup_get_pte(pte_t *ptep)
249+
{
250+
pte_t pte;
251+
252+
do {
253+
pte.pte_low = ptep->pte_low;
254+
smp_rmb();
255+
pte.pte_high = ptep->pte_high;
256+
smp_rmb();
257+
} while (unlikely(pte.pte_low != ptep->pte_low));
258+
259+
return pte;
260+
}
261+
215262
#endif /* _ASM_X86_PGTABLE_3LEVEL_H */

arch/x86/include/asm/pgtable.h

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -244,6 +244,11 @@ static inline int pud_devmap(pud_t pud)
244244
return 0;
245245
}
246246
#endif
247+
248+
static inline int pgd_devmap(pgd_t pgd)
249+
{
250+
return 0;
251+
}
247252
#endif
248253
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
249254

@@ -1185,6 +1190,54 @@ static inline u16 pte_flags_pkey(unsigned long pte_flags)
11851190
#endif
11861191
}
11871192

1193+
static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1194+
{
1195+
u32 pkru = read_pkru();
1196+
1197+
if (!__pkru_allows_read(pkru, pkey))
1198+
return false;
1199+
if (write && !__pkru_allows_write(pkru, pkey))
1200+
return false;
1201+
1202+
return true;
1203+
}
1204+
1205+
/*
1206+
* 'pteval' can come from a PTE, PMD or PUD. We only check
1207+
* _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1208+
* same value on all 3 types.
1209+
*/
1210+
static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1211+
{
1212+
unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1213+
1214+
if (write)
1215+
need_pte_bits |= _PAGE_RW;
1216+
1217+
if ((pteval & need_pte_bits) != need_pte_bits)
1218+
return 0;
1219+
1220+
return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1221+
}
1222+
1223+
#define pte_access_permitted pte_access_permitted
1224+
static inline bool pte_access_permitted(pte_t pte, bool write)
1225+
{
1226+
return __pte_access_permitted(pte_val(pte), write);
1227+
}
1228+
1229+
#define pmd_access_permitted pmd_access_permitted
1230+
static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1231+
{
1232+
return __pte_access_permitted(pmd_val(pmd), write);
1233+
}
1234+
1235+
#define pud_access_permitted pud_access_permitted
1236+
static inline bool pud_access_permitted(pud_t pud, bool write)
1237+
{
1238+
return __pte_access_permitted(pud_val(pud), write);
1239+
}
1240+
11881241
#include <asm-generic/pgtable.h>
11891242
#endif /* __ASSEMBLY__ */
11901243

arch/x86/include/asm/pgtable_64.h

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -227,6 +227,20 @@ extern void cleanup_highmap(void);
227227
extern void init_extra_mapping_uc(unsigned long phys, unsigned long size);
228228
extern void init_extra_mapping_wb(unsigned long phys, unsigned long size);
229229

230-
#endif /* !__ASSEMBLY__ */
230+
#define gup_fast_permitted gup_fast_permitted
231+
static inline bool gup_fast_permitted(unsigned long start, int nr_pages,
232+
int write)
233+
{
234+
unsigned long len, end;
235+
236+
len = (unsigned long)nr_pages << PAGE_SHIFT;
237+
end = start + len;
238+
if (end < start)
239+
return false;
240+
if (end >> __VIRTUAL_MASK_SHIFT)
241+
return false;
242+
return true;
243+
}
231244

245+
#endif /* !__ASSEMBLY__ */
232246
#endif /* _ASM_X86_PGTABLE_64_H */

arch/x86/mm/Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
KCOV_INSTRUMENT_tlb.o := n
33

44
obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
5-
pat.o pgtable.o physaddr.o gup.o setup_nx.o tlb.o
5+
pat.o pgtable.o physaddr.o setup_nx.o tlb.o
66

77
# Make sure __phys_addr has no stackprotector
88
nostackp := $(call cc-option, -fno-stack-protector)

0 commit comments

Comments
 (0)