Skip to content

Commit 5fe26b7

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
x86/mm/cpa: Simplify the code after making cpa->vaddr invariant
Since cpa->vaddr is invariant, this means we can remove all workarounds that deal with it changing. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tom.StDenis@amd.com Cc: dave.hansen@intel.com Link: http://lkml.kernel.org/r/20181203171043.366619025@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 98bfc9b commit 5fe26b7

File tree

2 files changed

+6
-14
lines changed

2 files changed

+6
-14
lines changed

arch/x86/mm/pageattr-test.c

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,6 @@ static int pageattr_test(void)
124124
unsigned int level;
125125
int i, k;
126126
int err;
127-
unsigned long test_addr;
128127

129128
if (print)
130129
printk(KERN_INFO "CPA self-test:\n");
@@ -181,8 +180,7 @@ static int pageattr_test(void)
181180

182181
switch (i % 3) {
183182
case 0:
184-
test_addr = addr[i];
185-
err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0);
183+
err = change_page_attr_set(&addr[i], len[i], PAGE_CPA_TEST, 0);
186184
break;
187185

188186
case 1:
@@ -226,8 +224,7 @@ static int pageattr_test(void)
226224
failed++;
227225
continue;
228226
}
229-
test_addr = addr[i];
230-
err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0);
227+
err = change_page_attr_clear(&addr[i], len[i], PAGE_CPA_TEST, 0);
231228
if (err < 0) {
232229
printk(KERN_ERR "CPA reverting failed: %d\n", err);
233230
failed++;

arch/x86/mm/pageattr.c

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1908,15 +1908,13 @@ EXPORT_SYMBOL_GPL(set_memory_array_wt);
19081908
int _set_memory_wc(unsigned long addr, int numpages)
19091909
{
19101910
int ret;
1911-
unsigned long addr_copy = addr;
19121911

19131912
ret = change_page_attr_set(&addr, numpages,
19141913
cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
19151914
0);
19161915
if (!ret) {
1917-
ret = change_page_attr_set_clr(&addr_copy, numpages,
1918-
cachemode2pgprot(
1919-
_PAGE_CACHE_MODE_WC),
1916+
ret = change_page_attr_set_clr(&addr, numpages,
1917+
cachemode2pgprot(_PAGE_CACHE_MODE_WC),
19201918
__pgprot(_PAGE_CACHE_MASK),
19211919
0, 0, NULL);
19221920
}
@@ -2064,7 +2062,6 @@ int set_memory_global(unsigned long addr, int numpages)
20642062
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
20652063
{
20662064
struct cpa_data cpa;
2067-
unsigned long start;
20682065
int ret;
20692066

20702067
/* Nothing to do if memory encryption is not active */
@@ -2075,8 +2072,6 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
20752072
if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
20762073
addr &= PAGE_MASK;
20772074

2078-
start = addr;
2079-
20802075
memset(&cpa, 0, sizeof(cpa));
20812076
cpa.vaddr = &addr;
20822077
cpa.numpages = numpages;
@@ -2091,7 +2086,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
20912086
/*
20922087
* Before changing the encryption attribute, we need to flush caches.
20932088
*/
2094-
cpa_flush_range(start, numpages, 1);
2089+
cpa_flush_range(addr, numpages, 1);
20952090

20962091
ret = __change_page_attr_set_clr(&cpa, 1);
20972092

@@ -2102,7 +2097,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
21022097
* in case TLB flushing gets optimized in the cpa_flush_range()
21032098
* path use the same logic as above.
21042099
*/
2105-
cpa_flush_range(start, numpages, 0);
2100+
cpa_flush_range(addr, numpages, 0);
21062101

21072102
return ret;
21082103
}

0 commit comments

Comments
 (0)