Skip to content

Commit 0895ecd

Browse files
dgibsonozbenh
authored andcommitted
powerpc/mm: Bring hugepage PTE accessor functions back into sync with normal accessors
The hugepage arch code provides a number of hook functions/macros which mirror the functionality of various normal page pte access functions. Various changes in the normal page accessors (in particular BenH's recent changes to the handling of lazy icache flushing and PAGE_EXEC) have caused the hugepage versions to get out of sync with the originals. In some cases, this is a bug, at least on some MMU types. One of the reasons that some hooks were not identical to the normal page versions, is that the fact we're dealing with a hugepage needed to be passed down do use the correct dcache-icache flush function. This patch makes the main flush_dcache_icache_page() function hugepage aware (by checking for the PageCompound flag). That in turn means we can make set_huge_pte_at() just a call to set_pte_at() bringing it back into sync. As a bonus, this lets us remove the hash_huge_page_do_lazy_icache() function, replacing it with a call to the hash_page_do_lazy_icache() function it was based on. Some other hugepage pte access hooks - huge_ptep_get_and_clear() and huge_ptep_clear_flush() - are not so easily unified, but this patch at least brings them back into sync with the current versions of the corresponding normal page functions. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
1 parent 883a3e5 commit 0895ecd

File tree

6 files changed

+45
-61
lines changed

6 files changed

+45
-61
lines changed

arch/powerpc/include/asm/hugetlb.h

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,19 +6,15 @@
66
pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
77
unsigned long addr, unsigned *shift);
88

9+
void flush_dcache_icache_hugepage(struct page *page);
10+
911
int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
1012
unsigned long len);
1113

1214
void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1315
unsigned long end, unsigned long floor,
1416
unsigned long ceiling);
1517

16-
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
17-
pte_t *ptep, pte_t pte);
18-
19-
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
20-
pte_t *ptep);
21-
2218
/*
2319
* The version of vma_mmu_pagesize() in arch/powerpc/mm/hugetlbpage.c needs
2420
* to override the version in mm/hugetlb.c
@@ -44,9 +40,26 @@ static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
4440
{
4541
}
4642

43+
44+
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
45+
pte_t *ptep, pte_t pte)
46+
{
47+
set_pte_at(mm, addr, ptep, pte);
48+
}
49+
50+
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
51+
unsigned long addr, pte_t *ptep)
52+
{
53+
unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
54+
return __pte(old);
55+
}
56+
4757
static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
4858
unsigned long addr, pte_t *ptep)
4959
{
60+
pte_t pte;
61+
pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
62+
flush_tlb_page(vma, addr);
5063
}
5164

5265
static inline int huge_pte_none(pte_t pte)

arch/powerpc/include/asm/mmu-hash64.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -245,6 +245,7 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
245245
unsigned long vsid, pte_t *ptep, unsigned long trap,
246246
unsigned int local, int ssize);
247247
struct mm_struct;
248+
unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
248249
extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
249250
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
250251
pte_t *ptep, unsigned long trap, int local, int ssize,

arch/powerpc/mm/hash_utils_64.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -775,7 +775,7 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
775775
/* page is dirty */
776776
if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
777777
if (trap == 0x400) {
778-
__flush_dcache_icache(page_address(page));
778+
flush_dcache_icache_page(page);
779779
set_bit(PG_arch_1, &page->flags);
780780
} else
781781
pp |= HPTE_R_N;

arch/powerpc/mm/hugetlbpage-hash64.c

Lines changed: 1 addition & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -14,33 +14,6 @@
1414
#include <asm/cacheflush.h>
1515
#include <asm/machdep.h>
1616

17-
/*
18-
* Called by asm hashtable.S for doing lazy icache flush
19-
*/
20-
static unsigned int hash_huge_page_do_lazy_icache(unsigned long rflags,
21-
pte_t pte, int trap, unsigned long sz)
22-
{
23-
struct page *page;
24-
int i;
25-
26-
if (!pfn_valid(pte_pfn(pte)))
27-
return rflags;
28-
29-
page = pte_page(pte);
30-
31-
/* page is dirty */
32-
if (!test_bit(PG_arch_1, &page->flags) && !PageReserved(page)) {
33-
if (trap == 0x400) {
34-
for (i = 0; i < (sz / PAGE_SIZE); i++)
35-
__flush_dcache_icache(page_address(page+i));
36-
set_bit(PG_arch_1, &page->flags);
37-
} else {
38-
rflags |= HPTE_R_N;
39-
}
40-
}
41-
return rflags;
42-
}
43-
4417
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
4518
pte_t *ptep, unsigned long trap, int local, int ssize,
4619
unsigned int shift, unsigned int mmu_psize)
@@ -89,8 +62,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
8962
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
9063
/* No CPU has hugepages but lacks no execute, so we
9164
* don't need to worry about that case */
92-
rflags = hash_huge_page_do_lazy_icache(rflags, __pte(old_pte),
93-
trap, sz);
65+
rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
9466

9567
/* Check if pte already has an hpte (case 2) */
9668
if (unlikely(old_pte & _PAGE_HASHPTE)) {

arch/powerpc/mm/hugetlbpage.c

Lines changed: 10 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -344,27 +344,6 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
344344
} while (pgd++, addr = next, addr != end);
345345
}
346346

347-
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
348-
pte_t *ptep, pte_t pte)
349-
{
350-
if (pte_present(*ptep)) {
351-
/* We open-code pte_clear because we need to pass the right
352-
* argument to hpte_need_flush (huge / !huge). Might not be
353-
* necessary anymore if we make hpte_need_flush() get the
354-
* page size from the slices
355-
*/
356-
pte_update(mm, addr, ptep, ~0UL, 1);
357-
}
358-
*ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
359-
}
360-
361-
pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
362-
pte_t *ptep)
363-
{
364-
unsigned long old = pte_update(mm, addr, ptep, ~0UL, 1);
365-
return __pte(old);
366-
}
367-
368347
struct page *
369348
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
370349
{
@@ -580,3 +559,13 @@ static int __init hugetlbpage_init(void)
580559
}
581560

582561
module_init(hugetlbpage_init);
562+
563+
void flush_dcache_icache_hugepage(struct page *page)
564+
{
565+
int i;
566+
567+
BUG_ON(!PageCompound(page));
568+
569+
for (i = 0; i < (1UL << compound_order(page)); i++)
570+
__flush_dcache_icache(page_address(page+i));
571+
}

arch/powerpc/mm/mem.c

Lines changed: 13 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,7 @@
3232
#include <linux/pagemap.h>
3333
#include <linux/suspend.h>
3434
#include <linux/lmb.h>
35+
#include <linux/hugetlb.h>
3536

3637
#include <asm/pgalloc.h>
3738
#include <asm/prom.h>
@@ -417,18 +418,26 @@ EXPORT_SYMBOL(flush_dcache_page);
417418

418419
void flush_dcache_icache_page(struct page *page)
419420
{
421+
#ifdef CONFIG_HUGETLB_PAGE
422+
if (PageCompound(page)) {
423+
flush_dcache_icache_hugepage(page);
424+
return;
425+
}
426+
#endif
420427
#ifdef CONFIG_BOOKE
421-
void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
422-
__flush_dcache_icache(start);
423-
kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
428+
{
429+
void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
430+
__flush_dcache_icache(start);
431+
kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
432+
}
424433
#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
425434
/* On 8xx there is no need to kmap since highmem is not supported */
426435
__flush_dcache_icache(page_address(page));
427436
#else
428437
__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
429438
#endif
430-
431439
}
440+
432441
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
433442
{
434443
clear_page(page);

0 commit comments

Comments
 (0)