Skip to content

Commit fe0937b

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
x86/mm/cpa: Fold cpa_flush_range() and cpa_flush_array() into a single cpa_flush() function
Note that the cache flush loop in cpa_flush_*() is identical when we use __cpa_addr(); further observe that flush_tlb_kernel_range() is a special case of to the cpa_flush_array() TLB invalidation code. This then means the two functions are virtually identical. Fold these two functions into a single cpa_flush() call. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.559855600@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 83b4e39 commit fe0937b

File tree

1 file changed

+18
-74
lines changed

1 file changed

+18
-74
lines changed

arch/x86/mm/pageattr.c

Lines changed: 18 additions & 74 deletions
Original file line numberDiff line numberDiff line change
@@ -304,51 +304,7 @@ static void cpa_flush_all(unsigned long cache)
304304
on_each_cpu(__cpa_flush_all, (void *) cache, 1);
305305
}
306306

307-
static bool __inv_flush_all(int cache)
308-
{
309-
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
310-
311-
if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
312-
cpa_flush_all(cache);
313-
return true;
314-
}
315-
316-
return false;
317-
}
318-
319-
static void cpa_flush_range(unsigned long start, int numpages, int cache)
320-
{
321-
unsigned int i, level;
322-
unsigned long addr;
323-
324-
WARN_ON(PAGE_ALIGN(start) != start);
325-
326-
if (__inv_flush_all(cache))
327-
return;
328-
329-
flush_tlb_kernel_range(start, start + PAGE_SIZE * numpages);
330-
331-
if (!cache)
332-
return;
333-
334-
/*
335-
* We only need to flush on one CPU,
336-
* clflush is a MESI-coherent instruction that
337-
* will cause all other CPUs to flush the same
338-
* cachelines:
339-
*/
340-
for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
341-
pte_t *pte = lookup_address(addr, &level);
342-
343-
/*
344-
* Only flush present addresses:
345-
*/
346-
if (pte && (pte_val(*pte) & _PAGE_PRESENT))
347-
clflush_cache_range((void *) addr, PAGE_SIZE);
348-
}
349-
}
350-
351-
void __cpa_flush_array(void *data)
307+
void __cpa_flush_tlb(void *data)
352308
{
353309
struct cpa_data *cpa = data;
354310
unsigned int i;
@@ -357,33 +313,31 @@ void __cpa_flush_array(void *data)
357313
__flush_tlb_one_kernel(__cpa_addr(cpa, i));
358314
}
359315

360-
static void cpa_flush_array(struct cpa_data *cpa, int cache)
316+
static void cpa_flush(struct cpa_data *data, int cache)
361317
{
318+
struct cpa_data *cpa = data;
362319
unsigned int i;
363320

364-
if (cpa_check_flush_all(cache))
321+
BUG_ON(irqs_disabled() && !early_boot_irqs_disabled);
322+
323+
if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
324+
cpa_flush_all(cache);
365325
return;
326+
}
366327

367328
if (cpa->numpages <= tlb_single_page_flush_ceiling)
368-
on_each_cpu(__cpa_flush_array, cpa, 1);
329+
on_each_cpu(__cpa_flush_tlb, cpa, 1);
369330
else
370331
flush_tlb_all();
371332

372333
if (!cache)
373334
return;
374335

375-
/*
376-
* We only need to flush on one CPU,
377-
* clflush is a MESI-coherent instruction that
378-
* will cause all other CPUs to flush the same
379-
* cachelines:
380-
*/
381336
for (i = 0; i < cpa->numpages; i++) {
382337
unsigned long addr = __cpa_addr(cpa, i);
383338
unsigned int level;
384-
pte_t *pte;
385339

386-
pte = lookup_address(addr, &level);
340+
pte_t *pte = lookup_address(addr, &level);
387341

388342
/*
389343
* Only flush present addresses:
@@ -1698,7 +1652,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
16981652
{
16991653
struct cpa_data cpa;
17001654
int ret, cache, checkalias;
1701-
unsigned long baddr = 0;
17021655

17031656
memset(&cpa, 0, sizeof(cpa));
17041657

@@ -1732,11 +1685,6 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
17321685
*/
17331686
WARN_ON_ONCE(1);
17341687
}
1735-
/*
1736-
* Save address for cache flush. *addr is modified in the call
1737-
* to __change_page_attr_set_clr() below.
1738-
*/
1739-
baddr = make_addr_canonical_again(*addr);
17401688
}
17411689

17421690
/* Must avoid aliasing mappings in the highmem code */
@@ -1784,11 +1732,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
17841732
goto out;
17851733
}
17861734

1787-
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
1788-
cpa_flush_array(&cpa, cache);
1789-
else
1790-
cpa_flush_range(baddr, numpages, cache);
1791-
1735+
cpa_flush(&cpa, cache);
17921736
out:
17931737
return ret;
17941738
}
@@ -2097,18 +2041,18 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
20972041
/*
20982042
* Before changing the encryption attribute, we need to flush caches.
20992043
*/
2100-
cpa_flush_range(addr, numpages, 1);
2044+
cpa_flush(&cpa, 1);
21012045

21022046
ret = __change_page_attr_set_clr(&cpa, 1);
21032047

21042048
/*
2105-
* After changing the encryption attribute, we need to flush TLBs
2106-
* again in case any speculative TLB caching occurred (but no need
2107-
* to flush caches again). We could just use cpa_flush_all(), but
2108-
* in case TLB flushing gets optimized in the cpa_flush_range()
2109-
* path use the same logic as above.
2049+
* After changing the encryption attribute, we need to flush TLBs again
2050+
* in case any speculative TLB caching occurred (but no need to flush
2051+
* caches again). We could just use cpa_flush_all(), but in case TLB
2052+
* flushing gets optimized in the cpa_flush() path use the same logic
2053+
* as above.
21102054
*/
2111-
cpa_flush_range(addr, numpages, 0);
2055+
cpa_flush(&cpa, 0);
21122056

21132057
return ret;
21142058
}

0 commit comments

Comments
 (0)