Commit 55e6f8f2 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-svm-6.12' of https://github.com/kvm-x86/linux into HEAD

KVM SVM changes for 6.12:

 - Don't stuff the RSB after VM-Exit when RETPOLINE=y and AutoIBRS is enabled,
   i.e. when the CPU has already flushed the RSB.

 - Trace the per-CPU host save area as a VMCB pointer to improve readability
   and cleanup the retrieval of the SEV-ES host save area.

 - Remove unnecessary accounting of temporary nested VMCB related allocations.
parents 43d97b2e 4440337a
...@@ -516,6 +516,20 @@ struct ghcb { ...@@ -516,6 +516,20 @@ struct ghcb {
u32 ghcb_usage; u32 ghcb_usage;
} __packed; } __packed;
struct vmcb {
struct vmcb_control_area control;
union {
struct vmcb_save_area save;
/*
* For SEV-ES VMs, the save area in the VMCB is used only to
* save/load host state. Guest state resides in a separate
* page, the aptly named VM Save Area (VMSA), that is encrypted
* with the guest's private key.
*/
struct sev_es_save_area host_sev_es_save;
};
} __packed;
#define EXPECTED_VMCB_SAVE_AREA_SIZE 744 #define EXPECTED_VMCB_SAVE_AREA_SIZE 744
#define EXPECTED_GHCB_SAVE_AREA_SIZE 1032 #define EXPECTED_GHCB_SAVE_AREA_SIZE 1032
...@@ -532,6 +546,7 @@ static inline void __unused_size_checks(void) ...@@ -532,6 +546,7 @@ static inline void __unused_size_checks(void)
BUILD_BUG_ON(sizeof(struct ghcb_save_area) != EXPECTED_GHCB_SAVE_AREA_SIZE); BUILD_BUG_ON(sizeof(struct ghcb_save_area) != EXPECTED_GHCB_SAVE_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct sev_es_save_area) != EXPECTED_SEV_ES_SAVE_AREA_SIZE); BUILD_BUG_ON(sizeof(struct sev_es_save_area) != EXPECTED_SEV_ES_SAVE_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE); BUILD_BUG_ON(sizeof(struct vmcb_control_area) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
BUILD_BUG_ON(offsetof(struct vmcb, save) != EXPECTED_VMCB_CONTROL_AREA_SIZE);
BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE); BUILD_BUG_ON(sizeof(struct ghcb) != EXPECTED_GHCB_SIZE);
/* Check offsets of reserved fields */ /* Check offsets of reserved fields */
...@@ -568,11 +583,6 @@ static inline void __unused_size_checks(void) ...@@ -568,11 +583,6 @@ static inline void __unused_size_checks(void)
BUILD_BUG_RESERVED_OFFSET(ghcb, 0xff0); BUILD_BUG_RESERVED_OFFSET(ghcb, 0xff0);
} }
struct vmcb {
struct vmcb_control_area control;
struct vmcb_save_area save;
} __packed;
#define SVM_CPUID_FUNC 0x8000000a #define SVM_CPUID_FUNC 0x8000000a
#define SVM_SELECTOR_S_SHIFT 4 #define SVM_SELECTOR_S_SHIFT 4
......
...@@ -1693,8 +1693,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -1693,8 +1693,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL; return -EINVAL;
ret = -ENOMEM; ret = -ENOMEM;
ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT); ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT); save = kzalloc(sizeof(*save), GFP_KERNEL);
if (!ctl || !save) if (!ctl || !save)
goto out_free; goto out_free;
......
...@@ -573,7 +573,7 @@ static void __svm_write_tsc_multiplier(u64 multiplier) ...@@ -573,7 +573,7 @@ static void __svm_write_tsc_multiplier(u64 multiplier)
static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd) static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
{ {
return page_address(sd->save_area) + 0x400; return &sd->save_area->host_sev_es_save;
} }
static inline void kvm_cpu_svm_disable(void) static inline void kvm_cpu_svm_disable(void)
...@@ -696,7 +696,7 @@ static void svm_cpu_uninit(int cpu) ...@@ -696,7 +696,7 @@ static void svm_cpu_uninit(int cpu)
return; return;
kfree(sd->sev_vmcbs); kfree(sd->sev_vmcbs);
__free_page(sd->save_area); __free_page(__sme_pa_to_page(sd->save_area_pa));
sd->save_area_pa = 0; sd->save_area_pa = 0;
sd->save_area = NULL; sd->save_area = NULL;
} }
...@@ -704,23 +704,24 @@ static void svm_cpu_uninit(int cpu) ...@@ -704,23 +704,24 @@ static void svm_cpu_uninit(int cpu)
static int svm_cpu_init(int cpu) static int svm_cpu_init(int cpu)
{ {
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu); struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
struct page *save_area_page;
int ret = -ENOMEM; int ret = -ENOMEM;
memset(sd, 0, sizeof(struct svm_cpu_data)); memset(sd, 0, sizeof(struct svm_cpu_data));
sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL); save_area_page = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
if (!sd->save_area) if (!save_area_page)
return ret; return ret;
ret = sev_cpu_init(sd); ret = sev_cpu_init(sd);
if (ret) if (ret)
goto free_save_area; goto free_save_area;
sd->save_area_pa = __sme_page_pa(sd->save_area); sd->save_area = page_address(save_area_page);
sd->save_area_pa = __sme_page_pa(save_area_page);
return 0; return 0;
free_save_area: free_save_area:
__free_page(sd->save_area); __free_page(save_area_page);
sd->save_area = NULL;
return ret; return ret;
} }
...@@ -1124,8 +1125,7 @@ static void svm_hardware_unsetup(void) ...@@ -1124,8 +1125,7 @@ static void svm_hardware_unsetup(void)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu); svm_cpu_uninit(cpu);
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), __free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE));
get_order(IOPM_SIZE));
iopm_base = 0; iopm_base = 0;
} }
...@@ -1301,7 +1301,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu) ...@@ -1301,7 +1301,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
if (!kvm_hlt_in_guest(vcpu->kvm)) if (!kvm_hlt_in_guest(vcpu->kvm))
svm_set_intercept(svm, INTERCEPT_HLT); svm_set_intercept(svm, INTERCEPT_HLT);
control->iopm_base_pa = __sme_set(iopm_base); control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
control->int_ctl = V_INTR_MASKING_MASK; control->int_ctl = V_INTR_MASKING_MASK;
...@@ -1503,7 +1503,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -1503,7 +1503,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
sev_free_vcpu(vcpu); sev_free_vcpu(vcpu);
__free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT)); __free_page(__sme_pa_to_page(svm->vmcb01.pa));
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
} }
...@@ -5281,7 +5281,7 @@ static __init int svm_hardware_setup(void) ...@@ -5281,7 +5281,7 @@ static __init int svm_hardware_setup(void)
iopm_va = page_address(iopm_pages); iopm_va = page_address(iopm_pages);
memset(iopm_va, 0xff, PAGE_SIZE * (1 << order)); memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT; iopm_base = __sme_page_pa(iopm_pages);
init_msrpm_offsets(); init_msrpm_offsets();
......
...@@ -25,7 +25,21 @@ ...@@ -25,7 +25,21 @@
#include "cpuid.h" #include "cpuid.h"
#include "kvm_cache_regs.h" #include "kvm_cache_regs.h"
#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT) /*
* Helpers to convert to/from physical addresses for pages whose address is
* consumed directly by hardware. Even though it's a physical address, SVM
* often restricts the address to the natural width, hence 'unsigned long'
* instead of 'hpa_t'.
*/
static inline unsigned long __sme_page_pa(struct page *page)
{
return __sme_set(page_to_pfn(page) << PAGE_SHIFT);
}
static inline struct page *__sme_pa_to_page(unsigned long pa)
{
return pfn_to_page(__sme_clr(pa) >> PAGE_SHIFT);
}
#define IOPM_SIZE PAGE_SIZE * 3 #define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2 #define MSRPM_SIZE PAGE_SIZE * 2
...@@ -321,7 +335,7 @@ struct svm_cpu_data { ...@@ -321,7 +335,7 @@ struct svm_cpu_data {
u32 next_asid; u32 next_asid;
u32 min_asid; u32 min_asid;
struct page *save_area; struct vmcb *save_area;
unsigned long save_area_pa; unsigned long save_area_pa;
struct vmcb *current_vmcb; struct vmcb *current_vmcb;
......
...@@ -209,10 +209,8 @@ SYM_FUNC_START(__svm_vcpu_run) ...@@ -209,10 +209,8 @@ SYM_FUNC_START(__svm_vcpu_run)
7: vmload %_ASM_AX 7: vmload %_ASM_AX
8: 8:
#ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
#endif
/* Clobbers RAX, RCX, RDX. */ /* Clobbers RAX, RCX, RDX. */
RESTORE_HOST_SPEC_CTRL RESTORE_HOST_SPEC_CTRL
...@@ -348,10 +346,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run) ...@@ -348,10 +346,8 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
2: cli 2: cli
#ifdef CONFIG_MITIGATION_RETPOLINE
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
#endif
/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */ /* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
RESTORE_HOST_SPEC_CTRL RESTORE_HOST_SPEC_CTRL
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment