Skip to content

Commit 0c4d268

Browse files
kvaneeshmpe
authored andcommitted
powerpc/book3s64/mm: Simplify the rcu callback for page table free
Instead of encoding shift in the table address, use an enumerated index value. This allow us to do different things in the callback for pte and pmd. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
1 parent 1c7ec8a commit 0c4d268

File tree

3 files changed

+41
-24
lines changed

3 files changed

+41
-24
lines changed

arch/powerpc/include/asm/book3s/64/pgalloc.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -124,14 +124,14 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
124124
}
125125

126126
static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
127-
unsigned long address)
127+
unsigned long address)
128128
{
129129
/*
130130
* By now all the pud entries should be none entries. So go
131131
* ahead and flush the page walk cache
132132
*/
133133
flush_tlb_pgtable(tlb, address);
134-
pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
134+
pgtable_free_tlb(tlb, pud, PUD_INDEX);
135135
}
136136

137137
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -146,14 +146,14 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
146146
}
147147

148148
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
149-
unsigned long address)
149+
unsigned long address)
150150
{
151151
/*
152152
* By now all the pud entries should be none entries. So go
153153
* ahead and flush the page walk cache
154154
*/
155155
flush_tlb_pgtable(tlb, address);
156-
return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
156+
return pgtable_free_tlb(tlb, pmd, PMD_INDEX);
157157
}
158158

159159
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@@ -203,7 +203,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
203203
* ahead and flush the page walk cache
204204
*/
205205
flush_tlb_pgtable(tlb, address);
206-
pgtable_free_tlb(tlb, table, 0);
206+
pgtable_free_tlb(tlb, table, PTE_INDEX);
207207
}
208208

209209
#define check_pgt_cache() do { } while (0)

arch/powerpc/include/asm/book3s/64/pgtable.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -273,6 +273,16 @@ extern unsigned long __pte_frag_size_shift;
273273
/* Bits to mask out from a PGD to get to the PUD page */
274274
#define PGD_MASKED_BITS 0xc0000000000000ffUL
275275

276+
/*
277+
* Used as an indicator for rcu callback functions
278+
*/
279+
enum pgtable_index {
280+
PTE_INDEX = 0,
281+
PMD_INDEX,
282+
PUD_INDEX,
283+
PGD_INDEX,
284+
};
285+
276286
extern unsigned long __vmalloc_start;
277287
extern unsigned long __vmalloc_end;
278288
#define VMALLOC_START __vmalloc_start

arch/powerpc/mm/pgtable-book3s64.c

Lines changed: 26 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -309,38 +309,45 @@ void pte_fragment_free(unsigned long *table, int kernel)
309309
}
310310
}
311311

312+
static inline void pgtable_free(void *table, int index)
313+
{
314+
switch (index) {
315+
case PTE_INDEX:
316+
pte_fragment_free(table, 0);
317+
break;
318+
case PMD_INDEX:
319+
kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), table);
320+
break;
321+
case PUD_INDEX:
322+
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
323+
break;
324+
/* We don't free pgd table via RCU callback */
325+
default:
326+
BUG();
327+
}
328+
}
329+
312330
#ifdef CONFIG_SMP
313-
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
331+
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
314332
{
315333
unsigned long pgf = (unsigned long)table;
316334

317-
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
318-
pgf |= shift;
335+
BUG_ON(index > MAX_PGTABLE_INDEX_SIZE);
336+
pgf |= index;
319337
tlb_remove_table(tlb, (void *)pgf);
320338
}
321339

322340
void __tlb_remove_table(void *_table)
323341
{
324342
void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
325-
unsigned int shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
343+
unsigned int index = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
326344

327-
if (!shift)
328-
/* PTE page needs special handling */
329-
pte_fragment_free(table, 0);
330-
else {
331-
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
332-
kmem_cache_free(PGT_CACHE(shift), table);
333-
}
345+
return pgtable_free(table, index);
334346
}
335347
#else
336-
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
348+
void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int index)
337349
{
338-
if (!shift) {
339-
/* PTE page needs special handling */
340-
pte_fragment_free(table, 0);
341-
} else {
342-
BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
343-
kmem_cache_free(PGT_CACHE(shift), table);
344-
}
350+
351+
return pgtable_free(table, index);
345352
}
346353
#endif

0 commit comments

Comments
 (0)