Skip to content

Commit 935f583

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
x86/mm/cpa: Optimize cpa_flush_array() TLB invalidation
Instead of punting and doing tlb_flush_all(), do the same as flush_tlb_kernel_range() does and use single page invalidations. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.430001980@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 5fe26b7 commit 935f583

File tree

3 files changed

+29
-19
lines changed

3 files changed

+29
-19
lines changed

arch/x86/mm/mm_internal.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,6 @@ extern int after_bootmem;
1919

2020
void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache);
2121

22+
extern unsigned long tlb_single_page_flush_ceiling;
23+
2224
#endif /* __X86_MM_INTERNAL_H */

arch/x86/mm/pageattr.c

Lines changed: 24 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,8 @@
2626
#include <asm/pat.h>
2727
#include <asm/set_memory.h>
2828

29+
#include "mm_internal.h"
30+
2931
/*
3032
* The current flushing context - we pass it instead of 5 arguments:
3133
*/
@@ -346,16 +348,26 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache)
346348
}
347349
}
348350

349-
static void cpa_flush_array(unsigned long baddr, unsigned long *start,
350-
int numpages, int cache,
351-
int in_flags, struct page **pages)
351+
void __cpa_flush_array(void *data)
352352
{
353-
unsigned int i, level;
353+
struct cpa_data *cpa = data;
354+
unsigned int i;
354355

355-
if (__inv_flush_all(cache))
356+
for (i = 0; i < cpa->numpages; i++)
357+
__flush_tlb_one_kernel(__cpa_addr(cpa, i));
358+
}
359+
360+
static void cpa_flush_array(struct cpa_data *cpa, int cache)
361+
{
362+
unsigned int i;
363+
364+
if (cpa_check_flush_all(cache))
356365
return;
357366

358-
flush_tlb_all();
367+
if (cpa->numpages <= tlb_single_page_flush_ceiling)
368+
on_each_cpu(__cpa_flush_array, cpa, 1);
369+
else
370+
flush_tlb_all();
359371

360372
if (!cache)
361373
return;
@@ -366,15 +378,11 @@ static void cpa_flush_array(unsigned long baddr, unsigned long *start,
366378
* will cause all other CPUs to flush the same
367379
* cachelines:
368380
*/
369-
for (i = 0; i < numpages; i++) {
370-
unsigned long addr;
381+
for (i = 0; i < cpa->numpages; i++) {
382+
unsigned long addr = __cpa_addr(cpa, i);
383+
unsigned int level;
371384
pte_t *pte;
372385

373-
if (in_flags & CPA_PAGES_ARRAY)
374-
addr = (unsigned long)page_address(pages[i]);
375-
else
376-
addr = start[i];
377-
378386
pte = lookup_address(addr, &level);
379387

380388
/*
@@ -1771,12 +1779,10 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
17711779
goto out;
17721780
}
17731781

1774-
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
1775-
cpa_flush_array(baddr, addr, numpages, cache,
1776-
cpa.flags, pages);
1777-
} else {
1782+
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
1783+
cpa_flush_array(&cpa, cache);
1784+
else
17781785
cpa_flush_range(baddr, numpages, cache);
1779-
}
17801786

17811787
out:
17821788
return ret;

arch/x86/mm/tlb.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,8 @@
1515
#include <asm/apic.h>
1616
#include <asm/uv/uv.h>
1717

18+
#include "mm_internal.h"
19+
1820
/*
1921
* TLB flushing, formerly SMP-only
2022
* c/o Linus Torvalds.
@@ -721,7 +723,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
721723
*
722724
* This is in units of pages.
723725
*/
724-
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
726+
unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
725727

726728
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
727729
unsigned long end, unsigned int stride_shift,

0 commit comments

Comments
 (0)