Commit 5dfe7a7e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86_tdx_for_6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 tdx updates from Dave Hansen:

 - Fix a race window where load_unaligned_zeropad() could cause a fatal
   shutdown during TDX private<=>shared conversion

   The race has never been observed in practice but might allow
   load_unaligned_zeropad() to catch a TDX page in the middle of its
   conversion process which would lead to a fatal and unrecoverable
   guest shutdown.

 - Annotate sites where VM "exit reasons" are reused as hypercall
   numbers.

* tag 'x86_tdx_for_6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Fix enc_status_change_finish_noop()
  x86/tdx: Fix race between set_memory_encrypted() and load_unaligned_zeropad()
  x86/mm: Allow guest.enc_status_change_prepare() to fail
  x86/tdx: Wrap exit reason with hcall_func()
parents 36db3144 94142c9d
...@@ -20,7 +20,7 @@ static inline unsigned int tdx_io_in(int size, u16 port) ...@@ -20,7 +20,7 @@ static inline unsigned int tdx_io_in(int size, u16 port)
{ {
struct tdx_hypercall_args args = { struct tdx_hypercall_args args = {
.r10 = TDX_HYPERCALL_STANDARD, .r10 = TDX_HYPERCALL_STANDARD,
.r11 = EXIT_REASON_IO_INSTRUCTION, .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
.r12 = size, .r12 = size,
.r13 = 0, .r13 = 0,
.r14 = port, .r14 = port,
...@@ -36,7 +36,7 @@ static inline void tdx_io_out(int size, u16 port, u32 value) ...@@ -36,7 +36,7 @@ static inline void tdx_io_out(int size, u16 port, u32 value)
{ {
struct tdx_hypercall_args args = { struct tdx_hypercall_args args = {
.r10 = TDX_HYPERCALL_STANDARD, .r10 = TDX_HYPERCALL_STANDARD,
.r11 = EXIT_REASON_IO_INSTRUCTION, .r11 = hcall_func(EXIT_REASON_IO_INSTRUCTION),
.r12 = size, .r12 = size,
.r13 = 1, .r13 = 1,
.r14 = port, .r14 = port,
......
...@@ -44,17 +44,6 @@ noinstr void __tdx_hypercall_failed(void) ...@@ -44,17 +44,6 @@ noinstr void __tdx_hypercall_failed(void)
panic("TDVMCALL failed. TDX module bug?"); panic("TDVMCALL failed. TDX module bug?");
} }
/*
* The TDG.VP.VMCALL-Instruction-execution sub-functions are defined
* independently from but are currently matched 1:1 with VMX EXIT_REASONs.
* Reusing the KVM EXIT_REASON macros makes it easier to connect the host and
* guest sides of these calls.
*/
static __always_inline u64 hcall_func(u64 exit_reason)
{
return exit_reason;
}
#ifdef CONFIG_KVM_GUEST #ifdef CONFIG_KVM_GUEST
long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2, long tdx_kvm_hypercall(unsigned int nr, unsigned long p1, unsigned long p2,
unsigned long p3, unsigned long p4) unsigned long p3, unsigned long p4)
...@@ -744,6 +733,30 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc) ...@@ -744,6 +733,30 @@ static bool tdx_enc_status_changed(unsigned long vaddr, int numpages, bool enc)
return true; return true;
} }
static bool tdx_enc_status_change_prepare(unsigned long vaddr, int numpages,
bool enc)
{
/*
* Only handle shared->private conversion here.
* See the comment in tdx_early_init().
*/
if (enc)
return tdx_enc_status_changed(vaddr, numpages, enc);
return true;
}
static bool tdx_enc_status_change_finish(unsigned long vaddr, int numpages,
bool enc)
{
/*
* Only handle private->shared conversion here.
* See the comment in tdx_early_init().
*/
if (!enc)
return tdx_enc_status_changed(vaddr, numpages, enc);
return true;
}
void __init tdx_early_init(void) void __init tdx_early_init(void)
{ {
u64 cc_mask; u64 cc_mask;
...@@ -771,9 +784,30 @@ void __init tdx_early_init(void) ...@@ -771,9 +784,30 @@ void __init tdx_early_init(void)
*/ */
physical_mask &= cc_mask - 1; physical_mask &= cc_mask - 1;
x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required; /*
x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required; * The kernel mapping should match the TDX metadata for the page.
x86_platform.guest.enc_status_change_finish = tdx_enc_status_changed; * load_unaligned_zeropad() can touch memory *adjacent* to that which is
* owned by the caller and can catch even _momentary_ mismatches. Bad
* things happen on mismatch:
*
* - Private mapping => Shared Page == Guest shutdown
* - Shared mapping => Private Page == Recoverable #VE
*
* guest.enc_status_change_prepare() converts the page from
* shared=>private before the mapping becomes private.
*
* guest.enc_status_change_finish() converts the page from
* private=>shared after the mapping becomes private.
*
* In both cases there is a temporary shared mapping to a private page,
* which can result in a #VE. But, there is never a private mapping to
* a shared page.
*/
x86_platform.guest.enc_status_change_prepare = tdx_enc_status_change_prepare;
x86_platform.guest.enc_status_change_finish = tdx_enc_status_change_finish;
x86_platform.guest.enc_cache_flush_required = tdx_cache_flush_required;
x86_platform.guest.enc_tlb_flush_required = tdx_tlb_flush_required;
/* /*
* TDX intercepts the RDMSR to read the X2APIC ID in the parallel * TDX intercepts the RDMSR to read the X2APIC ID in the parallel
......
...@@ -93,5 +93,16 @@ u64 __tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9, ...@@ -93,5 +93,16 @@ u64 __tdx_module_call(u64 fn, u64 rcx, u64 rdx, u64 r8, u64 r9,
bool tdx_accept_memory(phys_addr_t start, phys_addr_t end); bool tdx_accept_memory(phys_addr_t start, phys_addr_t end);
/*
* The TDG.VP.VMCALL-Instruction-execution sub-functions are defined
* independently from but are currently matched 1:1 with VMX EXIT_REASONs.
* Reusing the KVM EXIT_REASON macros makes it easier to connect the host and
* guest sides of these calls.
*/
static __always_inline u64 hcall_func(u64 exit_reason)
{
return exit_reason;
}
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_SHARED_TDX_H */ #endif /* _ASM_X86_SHARED_TDX_H */
...@@ -150,7 +150,7 @@ struct x86_init_acpi { ...@@ -150,7 +150,7 @@ struct x86_init_acpi {
* @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status * @enc_cache_flush_required Returns true if a cache flush is needed before changing page encryption status
*/ */
struct x86_guest { struct x86_guest {
void (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc); bool (*enc_status_change_prepare)(unsigned long vaddr, int npages, bool enc);
bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc); bool (*enc_status_change_finish)(unsigned long vaddr, int npages, bool enc);
bool (*enc_tlb_flush_required)(bool enc); bool (*enc_tlb_flush_required)(bool enc);
bool (*enc_cache_flush_required)(void); bool (*enc_cache_flush_required)(void);
......
...@@ -131,8 +131,8 @@ struct x86_cpuinit_ops x86_cpuinit = { ...@@ -131,8 +131,8 @@ struct x86_cpuinit_ops x86_cpuinit = {
static void default_nmi_init(void) { }; static void default_nmi_init(void) { };
static void enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { } static bool enc_status_change_prepare_noop(unsigned long vaddr, int npages, bool enc) { return true; }
static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return false; } static bool enc_status_change_finish_noop(unsigned long vaddr, int npages, bool enc) { return true; }
static bool enc_tlb_flush_required_noop(bool enc) { return false; } static bool enc_tlb_flush_required_noop(bool enc) { return false; }
static bool enc_cache_flush_required_noop(void) { return false; } static bool enc_cache_flush_required_noop(void) { return false; }
static bool is_private_mmio_noop(u64 addr) {return false; } static bool is_private_mmio_noop(u64 addr) {return false; }
......
...@@ -319,7 +319,7 @@ static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) ...@@ -319,7 +319,7 @@ static void enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
#endif #endif
} }
static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc) static bool amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool enc)
{ {
/* /*
* To maintain the security guarantees of SEV-SNP guests, make sure * To maintain the security guarantees of SEV-SNP guests, make sure
...@@ -327,6 +327,8 @@ static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool ...@@ -327,6 +327,8 @@ static void amd_enc_status_change_prepare(unsigned long vaddr, int npages, bool
*/ */
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc) if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP) && !enc)
snp_set_memory_shared(vaddr, npages); snp_set_memory_shared(vaddr, npages);
return true;
} }
/* Return true unconditionally: return value doesn't matter for the SEV side */ /* Return true unconditionally: return value doesn't matter for the SEV side */
......
...@@ -2152,7 +2152,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc) ...@@ -2152,7 +2152,8 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required()); cpa_flush(&cpa, x86_platform.guest.enc_cache_flush_required());
/* Notify hypervisor that we are about to set/clr encryption attribute. */ /* Notify hypervisor that we are about to set/clr encryption attribute. */
x86_platform.guest.enc_status_change_prepare(addr, numpages, enc); if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
return -EIO;
ret = __change_page_attr_set_clr(&cpa, 1); ret = __change_page_attr_set_clr(&cpa, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment