Commit 555b6841 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_mm_for_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm updates from Dave Hansen:

 - Add a warning when memory encryption conversions fail. These
   operations require VMM cooperation, even in CoCo environments where
   the VMM is untrusted. While it's _possible_ that memory pressure
   could trigger the new warning, the odds are that a guest would only
   see this from an attacking VMM.

 - Simplify page fault code by re-enabling interrupts unconditionally

 - Avoid truncation issues when pfns are passed in to pfn_to_kaddr()
   with small (<64-bit) types.

* tag 'x86_mm_for_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm/cpa: Warn for set_memory_XXcrypted() VMM fails
  x86/mm: Get rid of conditional IF flag handling in page fault path
  x86/mm: Ensure input to pfn_to_kaddr() is treated as a 64-bit type
parents 685d9821 82ace185
...@@ -66,10 +66,14 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr, ...@@ -66,10 +66,14 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
* virt_addr_valid(kaddr) returns true. * virt_addr_valid(kaddr) returns true.
*/ */
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
extern bool __virt_addr_valid(unsigned long kaddr); extern bool __virt_addr_valid(unsigned long kaddr);
#define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr)) #define virt_addr_valid(kaddr) __virt_addr_valid((unsigned long) (kaddr))
static __always_inline void *pfn_to_kaddr(unsigned long pfn)
{
return __va(pfn << PAGE_SHIFT);
}
static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits) static __always_inline u64 __canonical_address(u64 vaddr, u8 vaddr_bits)
{ {
return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits);
......
...@@ -1299,21 +1299,14 @@ void do_user_addr_fault(struct pt_regs *regs, ...@@ -1299,21 +1299,14 @@ void do_user_addr_fault(struct pt_regs *regs,
return; return;
} }
/* /* Legacy check - remove this after verifying that it doesn't trigger */
* It's safe to allow irq's after cr2 has been saved and the if (WARN_ON_ONCE(!(regs->flags & X86_EFLAGS_IF))) {
* vmalloc fault has been handled. bad_area_nosemaphore(regs, error_code, address);
* return;
* User-mode registers count as a user access even for any
* potential system fault or CPU buglet:
*/
if (user_mode(regs)) {
local_irq_enable();
flags |= FAULT_FLAG_USER;
} else {
if (regs->flags & X86_EFLAGS_IF)
local_irq_enable();
} }
local_irq_enable();
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
/* /*
...@@ -1329,6 +1322,14 @@ void do_user_addr_fault(struct pt_regs *regs, ...@@ -1329,6 +1322,14 @@ void do_user_addr_fault(struct pt_regs *regs,
if (error_code & X86_PF_INSTR) if (error_code & X86_PF_INSTR)
flags |= FAULT_FLAG_INSTRUCTION; flags |= FAULT_FLAG_INSTRUCTION;
/*
* We set FAULT_FLAG_USER based on the register state, not
* based on X86_PF_USER. User space accesses that cause
* system page faults are still user accesses.
*/
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
/* /*
* Faults in the vsyscall page might need emulation. The * Faults in the vsyscall page might need emulation. The
......
...@@ -2157,7 +2157,7 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc) ...@@ -2157,7 +2157,7 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
/* Notify hypervisor that we are about to set/clr encryption attribute. */ /* Notify hypervisor that we are about to set/clr encryption attribute. */
if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc)) if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
return -EIO; goto vmm_fail;
ret = __change_page_attr_set_clr(&cpa, 1); ret = __change_page_attr_set_clr(&cpa, 1);
...@@ -2170,13 +2170,20 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc) ...@@ -2170,13 +2170,20 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
*/ */
cpa_flush(&cpa, 0); cpa_flush(&cpa, 0);
if (ret)
return ret;
/* Notify hypervisor that we have successfully set/clr encryption attribute. */ /* Notify hypervisor that we have successfully set/clr encryption attribute. */
if (!ret) { if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc)) goto vmm_fail;
ret = -EIO;
}
return ret; return 0;
vmm_fail:
WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s.\n",
(void *)addr, numpages, enc ? "private" : "shared");
return -EIO;
} }
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc) static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment