Commit c482f2ce authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Move APIC access page helper to common x86 code

Move the APIC access page allocation helper function to common x86 code,
the allocation routine is virtually identical between APICv (VMX) and
AVIC (SVM).  Keep APICv's gfn_to_page() + put_page() sequence, which
verifies that a backing page can be allocated, i.e. that the system isn't
under heavy memory pressure.  Forcing the backing page to be populated
isn't strictly necessary, but skipping the effective prefetch only delays
the inevitable.
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20230106011306.85230-10-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1459f5c6
...@@ -2435,6 +2435,41 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu) ...@@ -2435,6 +2435,41 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
apic->highest_isr_cache = -1; apic->highest_isr_cache = -1;
} }
int kvm_alloc_apic_access_page(struct kvm *kvm)
{
struct page *page;
void __user *hva;
int ret = 0;
mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_memslot_enabled)
goto out;
hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
if (IS_ERR(hva)) {
ret = PTR_ERR(hva);
goto out;
}
page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (is_error_page(page)) {
ret = -EFAULT;
goto out;
}
/*
* Do not pin the page in memory, so that memory hot-unplug
* is able to migrate it.
*/
put_page(page);
kvm->arch.apic_access_memslot_enabled = true;
out:
mutex_unlock(&kvm->slots_lock);
return ret;
}
EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page);
void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
......
...@@ -112,6 +112,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq, ...@@ -112,6 +112,7 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
struct dest_map *dest_map); struct dest_map *dest_map);
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type); int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
void kvm_apic_update_apicv(struct kvm_vcpu *vcpu); void kvm_apic_update_apicv(struct kvm_vcpu *vcpu);
int kvm_alloc_apic_access_page(struct kvm *kvm);
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map); struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map);
......
...@@ -256,39 +256,6 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, ...@@ -256,39 +256,6 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
return &avic_physical_id_table[index]; return &avic_physical_id_table[index];
} }
/*
* Note:
* AVIC hardware walks the nested page table to check permissions,
* but does not use the SPA address specified in the leaf page
* table entry since it uses address in the AVIC_BACKING_PAGE pointer
* field of the VMCB. Therefore, we set up the
* APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
*/
static int avic_alloc_access_page(struct kvm *kvm)
{
void __user *ret;
int r = 0;
mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_memslot_enabled)
goto out;
ret = __x86_set_memory_region(kvm,
APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE,
PAGE_SIZE);
if (IS_ERR(ret)) {
r = PTR_ERR(ret);
goto out;
}
kvm->arch.apic_access_memslot_enabled = true;
out:
mutex_unlock(&kvm->slots_lock);
return r;
}
static int avic_init_backing_page(struct kvm_vcpu *vcpu) static int avic_init_backing_page(struct kvm_vcpu *vcpu)
{ {
u64 *entry, new_entry; u64 *entry, new_entry;
...@@ -305,7 +272,13 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) ...@@ -305,7 +272,13 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
if (kvm_apicv_activated(vcpu->kvm)) { if (kvm_apicv_activated(vcpu->kvm)) {
int ret; int ret;
ret = avic_alloc_access_page(vcpu->kvm); /*
* Note, AVIC hardware walks the nested page table to check
* permissions, but does not use the SPA address specified in
* the leaf SPTE since it uses address in the AVIC_BACKING_PAGE
* pointer field of the VMCB.
*/
ret = kvm_alloc_apic_access_page(vcpu->kvm);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -3808,39 +3808,6 @@ static void seg_setup(int seg) ...@@ -3808,39 +3808,6 @@ static void seg_setup(int seg)
vmcs_write32(sf->ar_bytes, ar); vmcs_write32(sf->ar_bytes, ar);
} }
static int alloc_apic_access_page(struct kvm *kvm)
{
struct page *page;
void __user *hva;
int ret = 0;
mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_memslot_enabled)
goto out;
hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
if (IS_ERR(hva)) {
ret = PTR_ERR(hva);
goto out;
}
page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (is_error_page(page)) {
ret = -EFAULT;
goto out;
}
/*
* Do not pin the page in memory, so that memory hot-unplug
* is able to migrate it.
*/
put_page(page);
kvm->arch.apic_access_memslot_enabled = true;
out:
mutex_unlock(&kvm->slots_lock);
return ret;
}
int allocate_vpid(void) int allocate_vpid(void)
{ {
int vpid; int vpid;
...@@ -7394,7 +7361,7 @@ static int vmx_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -7394,7 +7361,7 @@ static int vmx_vcpu_create(struct kvm_vcpu *vcpu)
vmx->loaded_vmcs = &vmx->vmcs01; vmx->loaded_vmcs = &vmx->vmcs01;
if (cpu_need_virtualize_apic_accesses(vcpu)) { if (cpu_need_virtualize_apic_accesses(vcpu)) {
err = alloc_apic_access_page(vcpu->kvm); err = kvm_alloc_apic_access_page(vcpu->kvm);
if (err) if (err)
goto free_vmcs; goto free_vmcs;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment