Skip to content

Commit 7023467

Browse files
kvaneeshmpe
authored andcommitted
powerpc/mm/nohash: Remove pte fragment dependency from nohash
Now that we have removed 64K page size support, the RCU page table free can be much simpler for nohash. Make a copy of the the rcu callback to pgalloc.h header similar to nohash 32. We could possibly merge 32 and 64 bit there. But that is for a later patch We also move the book3s specific handler to pgtable_book3s64.c. This will be updated in a later patch to handle split pmd ptlock. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent 7820856 commit 7023467

File tree

3 files changed

+159
-126
lines changed

3 files changed

+159
-126
lines changed

arch/powerpc/include/asm/nohash/64/pgalloc.h

Lines changed: 45 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,18 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
8484

8585
#define pmd_pgtable(pmd) pmd_page(pmd)
8686

87+
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
88+
{
89+
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
90+
pgtable_gfp_flags(mm, GFP_KERNEL));
91+
}
92+
93+
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
94+
{
95+
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
96+
}
97+
98+
8799
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
88100
unsigned long address)
89101
{
@@ -118,26 +130,47 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
118130
__free_page(ptepage);
119131
}
120132

121-
extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift);
133+
static inline void pgtable_free(void *table, int shift)
134+
{
135+
if (!shift) {
136+
pgtable_page_dtor(table);
137+
free_page((unsigned long)table);
138+
} else {
139+
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
140+
kmem_cache_free(PGT_CACHE(shift), table);
141+
}
142+
}
143+
122144
#ifdef CONFIG_SMP
123-
extern void __tlb_remove_table(void *_table);
124-
#endif
125-
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
126-
unsigned long address)
145+
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
127146
{
128-
tlb_flush_pgtable(tlb, address);
129-
pgtable_free_tlb(tlb, page_address(table), 0);
147+
unsigned long pgf = (unsigned long)table;
148+
149+
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
150+
pgf |= shift;
151+
tlb_remove_table(tlb, (void *)pgf);
130152
}
131153

132-
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
154+
static inline void __tlb_remove_table(void *_table)
133155
{
134-
return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
135-
pgtable_gfp_flags(mm, GFP_KERNEL));
156+
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
157+
unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
158+
159+
pgtable_free(table, shift);
136160
}
137161

138-
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
162+
#else
163+
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
139164
{
140-
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd);
165+
pgtable_free(table, shift);
166+
}
167+
#endif
168+
169+
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
170+
unsigned long address)
171+
{
172+
tlb_flush_pgtable(tlb, address);
173+
pgtable_free_tlb(tlb, page_address(table), 0);
141174
}
142175

143176
#define __pmd_free_tlb(tlb, pmd, addr) \

arch/powerpc/mm/pgtable-book3s64.c

Lines changed: 114 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -225,3 +225,117 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
225225
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
226226
}
227227
EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
228+
#ifdef CONFIG_PPC_64K_PAGES
229+
static pte_t *get_pte_from_cache(struct mm_struct *mm)
230+
{
231+
void *pte_frag, *ret;
232+
233+
spin_lock(&mm->page_table_lock);
234+
ret = mm->context.pte_frag;
235+
if (ret) {
236+
pte_frag = ret + PTE_FRAG_SIZE;
237+
/*
238+
* If we have taken up all the fragments mark PTE page NULL
239+
*/
240+
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
241+
pte_frag = NULL;
242+
mm->context.pte_frag = pte_frag;
243+
}
244+
spin_unlock(&mm->page_table_lock);
245+
return (pte_t *)ret;
246+
}
247+
248+
static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
249+
{
250+
void *ret = NULL;
251+
struct page *page;
252+
253+
if (!kernel) {
254+
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
255+
if (!page)
256+
return NULL;
257+
if (!pgtable_page_ctor(page)) {
258+
__free_page(page);
259+
return NULL;
260+
}
261+
} else {
262+
page = alloc_page(PGALLOC_GFP);
263+
if (!page)
264+
return NULL;
265+
}
266+
267+
ret = page_address(page);
268+
spin_lock(&mm->page_table_lock);
269+
/*
270+
* If we find pgtable_page set, we return
271+
* the allocated page with single fragement
272+
* count.
273+
*/
274+
if (likely(!mm->context.pte_frag)) {
275+
set_page_count(page, PTE_FRAG_NR);
276+
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
277+
}
278+
spin_unlock(&mm->page_table_lock);
279+
280+
return (pte_t *)ret;
281+
}
282+
283+
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
284+
{
285+
pte_t *pte;
286+
287+
pte = get_pte_from_cache(mm);
288+
if (pte)
289+
return pte;
290+
291+
return __alloc_for_ptecache(mm, kernel);
292+
}
293+
294+
#endif /* CONFIG_PPC_64K_PAGES */
295+
296+
void pte_fragment_free(unsigned long *table, int kernel)
297+
{
298+
struct page *page = virt_to_page(table);
299+
300+
if (put_page_testzero(page)) {
301+
if (!kernel)
302+
pgtable_page_dtor(page);
303+
free_unref_page(page);
304+
}
305+
}
306+
307+
#ifdef CONFIG_SMP
308+
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
309+
{
310+
unsigned long pgf = (unsigned long)table;
311+
312+
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
313+
pgf |= shift;
314+
tlb_remove_table(tlb, (void *)pgf);
315+
}
316+
317+
void __tlb_remove_table(void *_table)
318+
{
319+
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
320+
unsigned int shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
321+
322+
if (!shift)
323+
/* PTE page needs special handling */
324+
pte_fragment_free(table, 0);
325+
else {
326+
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
327+
kmem_cache_free(PGT_CACHE(shift), table);
328+
}
329+
}
330+
#else
331+
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
332+
{
333+
if (!shift) {
334+
/* PTE page needs special handling */
335+
pte_fragment_free(table, 0);
336+
} else {
337+
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
338+
kmem_cache_free(PGT_CACHE(shift), table);
339+
}
340+
}
341+
#endif

arch/powerpc/mm/pgtable_64.c

Lines changed: 0 additions & 114 deletions
Original file line numberDiff line numberDiff line change
@@ -313,120 +313,6 @@ struct page *pmd_page(pmd_t pmd)
313313
return virt_to_page(pmd_page_vaddr(pmd));
314314
}
315315

316-
#ifdef CONFIG_PPC_64K_PAGES
317-
static pte_t *get_pte_from_cache(struct mm_struct *mm)
318-
{
319-
void *pte_frag, *ret;
320-
321-
spin_lock(&mm->page_table_lock);
322-
ret = mm->context.pte_frag;
323-
if (ret) {
324-
pte_frag = ret + PTE_FRAG_SIZE;
325-
/*
326-
* If we have taken up all the fragments mark PTE page NULL
327-
*/
328-
if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
329-
pte_frag = NULL;
330-
mm->context.pte_frag = pte_frag;
331-
}
332-
spin_unlock(&mm->page_table_lock);
333-
return (pte_t *)ret;
334-
}
335-
336-
static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
337-
{
338-
void *ret = NULL;
339-
struct page *page;
340-
341-
if (!kernel) {
342-
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
343-
if (!page)
344-
return NULL;
345-
if (!pgtable_page_ctor(page)) {
346-
__free_page(page);
347-
return NULL;
348-
}
349-
} else {
350-
page = alloc_page(PGALLOC_GFP);
351-
if (!page)
352-
return NULL;
353-
}
354-
355-
ret = page_address(page);
356-
spin_lock(&mm->page_table_lock);
357-
/*
358-
* If we find pgtable_page set, we return
359-
* the allocated page with single fragement
360-
* count.
361-
*/
362-
if (likely(!mm->context.pte_frag)) {
363-
set_page_count(page, PTE_FRAG_NR);
364-
mm->context.pte_frag = ret + PTE_FRAG_SIZE;
365-
}
366-
spin_unlock(&mm->page_table_lock);
367-
368-
return (pte_t *)ret;
369-
}
370-
371-
pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
372-
{
373-
pte_t *pte;
374-
375-
pte = get_pte_from_cache(mm);
376-
if (pte)
377-
return pte;
378-
379-
return __alloc_for_ptecache(mm, kernel);
380-
}
381-
382-
#endif /* CONFIG_PPC_64K_PAGES */
383-
384-
void pte_fragment_free(unsigned long *table, int kernel)
385-
{
386-
struct page *page = virt_to_page(table);
387-
if (put_page_testzero(page)) {
388-
if (!kernel)
389-
pgtable_page_dtor(page);
390-
free_unref_page(page);
391-
}
392-
}
393-
394-
#ifdef CONFIG_SMP
395-
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
396-
{
397-
unsigned long pgf = (unsigned long)table;
398-
399-
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
400-
pgf |= shift;
401-
tlb_remove_table(tlb, (void *)pgf);
402-
}
403-
404-
void __tlb_remove_table(void *_table)
405-
{
406-
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
407-
unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
408-
409-
if (!shift)
410-
/* PTE page needs special handling */
411-
pte_fragment_free(table, 0);
412-
else {
413-
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
414-
kmem_cache_free(PGT_CACHE(shift), table);
415-
}
416-
}
417-
#else
418-
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
419-
{
420-
if (!shift) {
421-
/* PTE page needs special handling */
422-
pte_fragment_free(table, 0);
423-
} else {
424-
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
425-
kmem_cache_free(PGT_CACHE(shift), table);
426-
}
427-
}
428-
#endif
429-
430316
#ifdef CONFIG_STRICT_KERNEL_RWX
431317
void mark_rodata_ro(void)
432318
{

0 commit comments

Comments
 (0)