Commit 5fe26b7a authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

x86/mm/cpa: Simplify the code after making cpa->vaddr invariant

Since cpa->vaddr is invariant, this means we can remove all
workarounds that deal with it changing.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tom.StDenis@amd.com
Cc: dave.hansen@intel.com
Link: http://lkml.kernel.org/r/20181203171043.366619025@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 98bfc9b0
...@@ -124,7 +124,6 @@ static int pageattr_test(void) ...@@ -124,7 +124,6 @@ static int pageattr_test(void)
unsigned int level; unsigned int level;
int i, k; int i, k;
int err; int err;
unsigned long test_addr;
if (print) if (print)
printk(KERN_INFO "CPA self-test:\n"); printk(KERN_INFO "CPA self-test:\n");
...@@ -181,8 +180,7 @@ static int pageattr_test(void) ...@@ -181,8 +180,7 @@ static int pageattr_test(void)
switch (i % 3) { switch (i % 3) {
case 0: case 0:
test_addr = addr[i]; err = change_page_attr_set(&addr[i], len[i], PAGE_CPA_TEST, 0);
err = change_page_attr_set(&test_addr, len[i], PAGE_CPA_TEST, 0);
break; break;
case 1: case 1:
...@@ -226,8 +224,7 @@ static int pageattr_test(void) ...@@ -226,8 +224,7 @@ static int pageattr_test(void)
failed++; failed++;
continue; continue;
} }
test_addr = addr[i]; err = change_page_attr_clear(&addr[i], len[i], PAGE_CPA_TEST, 0);
err = change_page_attr_clear(&test_addr, len[i], PAGE_CPA_TEST, 0);
if (err < 0) { if (err < 0) {
printk(KERN_ERR "CPA reverting failed: %d\n", err); printk(KERN_ERR "CPA reverting failed: %d\n", err);
failed++; failed++;
......
...@@ -1908,15 +1908,13 @@ EXPORT_SYMBOL_GPL(set_memory_array_wt); ...@@ -1908,15 +1908,13 @@ EXPORT_SYMBOL_GPL(set_memory_array_wt);
int _set_memory_wc(unsigned long addr, int numpages) int _set_memory_wc(unsigned long addr, int numpages)
{ {
int ret; int ret;
unsigned long addr_copy = addr;
ret = change_page_attr_set(&addr, numpages, ret = change_page_attr_set(&addr, numpages,
cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS), cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
0); 0);
if (!ret) { if (!ret) {
ret = change_page_attr_set_clr(&addr_copy, numpages, ret = change_page_attr_set_clr(&addr, numpages,
cachemode2pgprot( cachemode2pgprot(_PAGE_CACHE_MODE_WC),
_PAGE_CACHE_MODE_WC),
__pgprot(_PAGE_CACHE_MASK), __pgprot(_PAGE_CACHE_MASK),
0, 0, NULL); 0, 0, NULL);
} }
...@@ -2064,7 +2062,6 @@ int set_memory_global(unsigned long addr, int numpages) ...@@ -2064,7 +2062,6 @@ int set_memory_global(unsigned long addr, int numpages)
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
{ {
struct cpa_data cpa; struct cpa_data cpa;
unsigned long start;
int ret; int ret;
/* Nothing to do if memory encryption is not active */ /* Nothing to do if memory encryption is not active */
...@@ -2075,8 +2072,6 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) ...@@ -2075,8 +2072,6 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr)) if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
addr &= PAGE_MASK; addr &= PAGE_MASK;
start = addr;
memset(&cpa, 0, sizeof(cpa)); memset(&cpa, 0, sizeof(cpa));
cpa.vaddr = &addr; cpa.vaddr = &addr;
cpa.numpages = numpages; cpa.numpages = numpages;
...@@ -2091,7 +2086,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) ...@@ -2091,7 +2086,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
/* /*
* Before changing the encryption attribute, we need to flush caches. * Before changing the encryption attribute, we need to flush caches.
*/ */
cpa_flush_range(start, numpages, 1); cpa_flush_range(addr, numpages, 1);
ret = __change_page_attr_set_clr(&cpa, 1); ret = __change_page_attr_set_clr(&cpa, 1);
...@@ -2102,7 +2097,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) ...@@ -2102,7 +2097,7 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
* in case TLB flushing gets optimized in the cpa_flush_range() * in case TLB flushing gets optimized in the cpa_flush_range()
* path use the same logic as above. * path use the same logic as above.
*/ */
cpa_flush_range(start, numpages, 0); cpa_flush_range(addr, numpages, 0);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment