Skip to content

Commit 1874f68

Browse files
hansendcIngo Molnar
authored andcommitted
x86/mm/gup: Simplify get_user_pages() PTE bit handling
The current get_user_pages() code is a wee bit more complicated than it needs to be for pte bit checking. Currently, it establishes a mask of required pte _PAGE_* bits and ensures that the pte it goes after has all those bits. This consolidates the three identical copies of this code. Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Hansen <dave@sr71.net> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20160212210218.3A2D4045@viggo.jf.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent d4925e0 commit 1874f68

File tree

1 file changed

+22
-16
lines changed

1 file changed

+22
-16
lines changed

arch/x86/mm/gup.c

Lines changed: 22 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,24 @@ static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
7474
}
7575
}
7676

77+
/*
78+
* 'pteval' can come from a pte, pmd or pud. We only check
79+
* _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
80+
* same value on all 3 types.
81+
*/
82+
static inline int pte_allows_gup(unsigned long pteval, int write)
83+
{
84+
unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
85+
86+
if (write)
87+
need_pte_bits |= _PAGE_RW;
88+
89+
if ((pteval & need_pte_bits) != need_pte_bits)
90+
return 0;
91+
92+
return 1;
93+
}
94+
7795
/*
7896
* The performance critical leaf functions are made noinline otherwise gcc
7997
* inlines everything into a single function which results in too much
@@ -83,14 +101,9 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
83101
unsigned long end, int write, struct page **pages, int *nr)
84102
{
85103
struct dev_pagemap *pgmap = NULL;
86-
unsigned long mask;
87104
int nr_start = *nr;
88105
pte_t *ptep;
89106

90-
mask = _PAGE_PRESENT|_PAGE_USER;
91-
if (write)
92-
mask |= _PAGE_RW;
93-
94107
ptep = pte_offset_map(&pmd, addr);
95108
do {
96109
pte_t pte = gup_get_pte(ptep);
@@ -110,7 +123,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
110123
pte_unmap(ptep);
111124
return 0;
112125
}
113-
} else if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
126+
} else if (!pte_allows_gup(pte_val(pte), write) ||
127+
pte_special(pte)) {
114128
pte_unmap(ptep);
115129
return 0;
116130
}
@@ -164,14 +178,10 @@ static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
164178
static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
165179
unsigned long end, int write, struct page **pages, int *nr)
166180
{
167-
unsigned long mask;
168181
struct page *head, *page;
169182
int refs;
170183

171-
mask = _PAGE_PRESENT|_PAGE_USER;
172-
if (write)
173-
mask |= _PAGE_RW;
174-
if ((pmd_flags(pmd) & mask) != mask)
184+
if (!pte_allows_gup(pmd_val(pmd), write))
175185
return 0;
176186

177187
VM_BUG_ON(!pfn_valid(pmd_pfn(pmd)));
@@ -231,14 +241,10 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
231241
static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
232242
unsigned long end, int write, struct page **pages, int *nr)
233243
{
234-
unsigned long mask;
235244
struct page *head, *page;
236245
int refs;
237246

238-
mask = _PAGE_PRESENT|_PAGE_USER;
239-
if (write)
240-
mask |= _PAGE_RW;
241-
if ((pud_flags(pud) & mask) != mask)
247+
if (!pte_allows_gup(pud_val(pud), write))
242248
return 0;
243249
/* hugepages are never "special" */
244250
VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL);

0 commit comments

Comments
 (0)