Commit 1229cbef authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-svm-6.11' of https://github.com/kvm-x86/linux into HEAD

KVM SVM changes for 6.11

 - Make per-CPU save_area allocations NUMA-aware.

 - Force sev_es_host_save_area() to be inlined to avoid calling into an
   instrumentable function from noinstr code.
parents dbfd50cb 704ec48f
...@@ -1181,7 +1181,7 @@ int svm_allocate_nested(struct vcpu_svm *svm) ...@@ -1181,7 +1181,7 @@ int svm_allocate_nested(struct vcpu_svm *svm)
if (svm->nested.initialized) if (svm->nested.initialized)
return 0; return 0;
vmcb02_page = snp_safe_alloc_page(&svm->vcpu); vmcb02_page = snp_safe_alloc_page();
if (!vmcb02_page) if (!vmcb02_page)
return -ENOMEM; return -ENOMEM;
svm->nested.vmcb02.ptr = page_address(vmcb02_page); svm->nested.vmcb02.ptr = page_address(vmcb02_page);
......
...@@ -4459,13 +4459,13 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ...@@ -4459,13 +4459,13 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
} }
} }
struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
{ {
unsigned long pfn; unsigned long pfn;
struct page *p; struct page *p;
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP)) if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
/* /*
* Allocate an SNP-safe page to workaround the SNP erratum where * Allocate an SNP-safe page to workaround the SNP erratum where
...@@ -4476,7 +4476,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) ...@@ -4476,7 +4476,7 @@ struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu)
* Allocate one extra page, choose a page which is not * Allocate one extra page, choose a page which is not
* 2MB-aligned, and free the other. * 2MB-aligned, and free the other.
*/ */
p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1); p = alloc_pages_node(node, gfp | __GFP_ZERO, 1);
if (!p) if (!p)
return NULL; return NULL;
......
...@@ -571,6 +571,11 @@ static void __svm_write_tsc_multiplier(u64 multiplier) ...@@ -571,6 +571,11 @@ static void __svm_write_tsc_multiplier(u64 multiplier)
__this_cpu_write(current_tsc_ratio, multiplier); __this_cpu_write(current_tsc_ratio, multiplier);
} }
static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
{
return page_address(sd->save_area) + 0x400;
}
static inline void kvm_cpu_svm_disable(void) static inline void kvm_cpu_svm_disable(void)
{ {
uint64_t efer; uint64_t efer;
...@@ -675,12 +680,9 @@ static int svm_hardware_enable(void) ...@@ -675,12 +680,9 @@ static int svm_hardware_enable(void)
* TSC_AUX field now to avoid a RDMSR on every vCPU run. * TSC_AUX field now to avoid a RDMSR on every vCPU run.
*/ */
if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) { if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
struct sev_es_save_area *hostsa;
u32 __maybe_unused msr_hi; u32 __maybe_unused msr_hi;
hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400); rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
rdmsr(MSR_TSC_AUX, hostsa->tsc_aux, msr_hi);
} }
return 0; return 0;
...@@ -705,7 +707,7 @@ static int svm_cpu_init(int cpu) ...@@ -705,7 +707,7 @@ static int svm_cpu_init(int cpu)
int ret = -ENOMEM; int ret = -ENOMEM;
memset(sd, 0, sizeof(struct svm_cpu_data)); memset(sd, 0, sizeof(struct svm_cpu_data));
sd->save_area = snp_safe_alloc_page(NULL); sd->save_area = snp_safe_alloc_page_node(cpu_to_node(cpu), GFP_KERNEL);
if (!sd->save_area) if (!sd->save_area)
return ret; return ret;
...@@ -1431,7 +1433,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -1431,7 +1433,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
svm = to_svm(vcpu); svm = to_svm(vcpu);
err = -ENOMEM; err = -ENOMEM;
vmcb01_page = snp_safe_alloc_page(vcpu); vmcb01_page = snp_safe_alloc_page();
if (!vmcb01_page) if (!vmcb01_page)
goto out; goto out;
...@@ -1440,7 +1442,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -1440,7 +1442,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu)
* SEV-ES guests require a separate VMSA page used to contain * SEV-ES guests require a separate VMSA page used to contain
* the encrypted register state of the guest. * the encrypted register state of the guest.
*/ */
vmsa_page = snp_safe_alloc_page(vcpu); vmsa_page = snp_safe_alloc_page();
if (!vmsa_page) if (!vmsa_page)
goto error_free_vmcb_page; goto error_free_vmcb_page;
} }
...@@ -1505,11 +1507,6 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu) ...@@ -1505,11 +1507,6 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
__free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
} }
static struct sev_es_save_area *sev_es_host_save_area(struct svm_cpu_data *sd)
{
return page_address(sd->save_area) + 0x400;
}
static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
...@@ -4968,7 +4965,7 @@ static int svm_vm_init(struct kvm *kvm) ...@@ -4968,7 +4965,7 @@ static int svm_vm_init(struct kvm *kvm)
static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu) static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
{ {
struct page *page = snp_safe_alloc_page(vcpu); struct page *page = snp_safe_alloc_page();
if (!page) if (!page)
return NULL; return NULL;
......
...@@ -726,7 +726,13 @@ void sev_guest_memory_reclaimed(struct kvm *kvm); ...@@ -726,7 +726,13 @@ void sev_guest_memory_reclaimed(struct kvm *kvm);
int sev_handle_vmgexit(struct kvm_vcpu *vcpu); int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
/* These symbols are used in common code and are stubbed below. */ /* These symbols are used in common code and are stubbed below. */
struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu);
struct page *snp_safe_alloc_page_node(int node, gfp_t gfp);
static inline struct page *snp_safe_alloc_page(void)
{
return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
}
void sev_free_vcpu(struct kvm_vcpu *vcpu); void sev_free_vcpu(struct kvm_vcpu *vcpu);
void sev_vm_destroy(struct kvm *kvm); void sev_vm_destroy(struct kvm *kvm);
void __init sev_set_cpu_caps(void); void __init sev_set_cpu_caps(void);
...@@ -741,8 +747,14 @@ int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); ...@@ -741,8 +747,14 @@ int sev_gmem_prepare(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order);
void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end); void sev_gmem_invalidate(kvm_pfn_t start, kvm_pfn_t end);
int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn); int sev_private_max_mapping_level(struct kvm *kvm, kvm_pfn_t pfn);
#else #else
static inline struct page *snp_safe_alloc_page(struct kvm_vcpu *vcpu) { static inline struct page *snp_safe_alloc_page_node(int node, gfp_t gfp)
return alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); {
return alloc_pages_node(node, gfp | __GFP_ZERO, 0);
}
static inline struct page *snp_safe_alloc_page(void)
{
return snp_safe_alloc_page_node(numa_node_id(), GFP_KERNEL_ACCOUNT);
} }
static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {} static inline void sev_free_vcpu(struct kvm_vcpu *vcpu) {}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment