Skip to content

Commit 738f964

Browse files
kvaneeshmpe
authored andcommitted
powerpc/mm: Use page fragments for allocation page table at PMD level
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent 8a6c697 commit 738f964

File tree

7 files changed

+6
-23
lines changed

7 files changed

+6
-23
lines changed

arch/powerpc/include/asm/book3s/64/hash.h

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -23,16 +23,6 @@
2323
H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
2424
#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
2525

26-
#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \
27-
defined(CONFIG_PPC_64K_PAGES)
28-
/*
29-
* only with hash 64k we need to use the second half of pmd page table
30-
* to store pointer to deposited pgtable_t
31-
*/
32-
#define H_PMD_CACHE_INDEX (H_PMD_INDEX_SIZE + 1)
33-
#else
34-
#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
35-
#endif
3626
/*
3727
* We store the slot details in the second half of page table.
3828
* Increase the pud level table so that hugetlb ptes can be stored

arch/powerpc/include/asm/book3s/64/pgalloc.h

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -90,8 +90,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
9090
* need to do this for 4k.
9191
*/
9292
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \
93-
((H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) || \
94-
(H_PGD_INDEX_SIZE == H_PMD_CACHE_INDEX))
93+
(H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX)
9594
memset(pgd, 0, PGD_TABLE_SIZE);
9695
#endif
9796
return pgd;
@@ -138,13 +137,12 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
138137

139138
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
140139
{
141-
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
142-
pgtable_gfp_flags(mm, GFP_KERNEL));
140+
return pmd_fragment_alloc(mm, addr);
143141
}
144142

145143
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
146144
{
147-
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
145+
pmd_fragment_free((unsigned long *)pmd);
148146
}
149147

150148
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,

arch/powerpc/include/asm/book3s/64/pgtable.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -212,13 +212,13 @@ extern unsigned long __pte_index_size;
212212
extern unsigned long __pmd_index_size;
213213
extern unsigned long __pud_index_size;
214214
extern unsigned long __pgd_index_size;
215-
extern unsigned long __pmd_cache_index;
216215
extern unsigned long __pud_cache_index;
217216
#define PTE_INDEX_SIZE __pte_index_size
218217
#define PMD_INDEX_SIZE __pmd_index_size
219218
#define PUD_INDEX_SIZE __pud_index_size
220219
#define PGD_INDEX_SIZE __pgd_index_size
221-
#define PMD_CACHE_INDEX __pmd_cache_index
220+
/* pmd table use page table fragments */
221+
#define PMD_CACHE_INDEX 0
222222
#define PUD_CACHE_INDEX __pud_cache_index
223223
/*
224224
* Because of use of pte fragments and THP, size of page table

arch/powerpc/mm/hash_utils_64.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1020,7 +1020,6 @@ void __init hash__early_init_mmu(void)
10201020
__pud_index_size = H_PUD_INDEX_SIZE;
10211021
__pgd_index_size = H_PGD_INDEX_SIZE;
10221022
__pud_cache_index = H_PUD_CACHE_INDEX;
1023-
__pmd_cache_index = H_PMD_CACHE_INDEX;
10241023
__pte_table_size = H_PTE_TABLE_SIZE;
10251024
__pmd_table_size = H_PMD_TABLE_SIZE;
10261025
__pud_table_size = H_PUD_TABLE_SIZE;

arch/powerpc/mm/pgtable-book3s64.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -400,7 +400,7 @@ static inline void pgtable_free(void *table, int index)
400400
pte_fragment_free(table, 0);
401401
break;
402402
case PMD_INDEX:
403-
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), table);
403+
pmd_fragment_free(table);
404404
break;
405405
case PUD_INDEX:
406406
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
@@ -431,7 +431,6 @@ void __tlb_remove_table(void *_table)
431431
#else
432432
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
433433
{
434-
435434
return pgtable_free(table, index);
436435
}
437436
#endif

arch/powerpc/mm/pgtable-radix.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -617,7 +617,6 @@ void __init radix__early_init_mmu(void)
617617
__pud_index_size = RADIX_PUD_INDEX_SIZE;
618618
__pgd_index_size = RADIX_PGD_INDEX_SIZE;
619619
__pud_cache_index = RADIX_PUD_INDEX_SIZE;
620-
__pmd_cache_index = RADIX_PMD_INDEX_SIZE;
621620
__pte_table_size = RADIX_PTE_TABLE_SIZE;
622621
__pmd_table_size = RADIX_PMD_TABLE_SIZE;
623622
__pud_table_size = RADIX_PUD_TABLE_SIZE;

arch/powerpc/mm/pgtable_64.c

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -72,8 +72,6 @@ unsigned long __pud_index_size;
7272
EXPORT_SYMBOL(__pud_index_size);
7373
unsigned long __pgd_index_size;
7474
EXPORT_SYMBOL(__pgd_index_size);
75-
unsigned long __pmd_cache_index;
76-
EXPORT_SYMBOL(__pmd_cache_index);
7775
unsigned long __pud_cache_index;
7876
EXPORT_SYMBOL(__pud_cache_index);
7977
unsigned long __pte_table_size;

0 commit comments

Comments
 (0)