Commit b5d4e232 authored by Bibo Mao's avatar Bibo Mao Committed by Huacai Chen

LoongArch: KVM: Delay secondary mmu tlb flush until guest entry

With hardware assisted virtualization, there are two level HW mmu, one
is GVA to GPA mapping, the other is GPA to HPA mapping which is called
secondary mmu in generic. If there is page fault for secondary mmu,
there needs tlb flush operation indexed with fault GPA address and VMID.
VMID is stored at register CSR.GSTAT and will be reload or recalculated
before guest entry.

Currently CSR.GSTAT is not saved and restored during VCPU context
switch, instead it is recalculated during guest entry. So CSR.GSTAT is
effective only when a VCPU runs in guest mode, however it may not be
effective if the VCPU exits to host mode. Since register CSR.GSTAT may
be stale, it may records the VMID of the last schedule-out VCPU, rather
than the current VCPU.

Function kvm_flush_tlb_gpa() should be called with its real VMID, so
here move it to the guest entrance. Also an arch-specific request id
KVM_REQ_TLB_FLUSH_GPA is added to flush tlb for secondary mmu, and it
can be optimized if VMID is updated, since all guest tlb entries will
be invalid if VMID is updated.
Signed-off-by: default avatarBibo Mao <maobibo@loongson.cn>
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent e306e514
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#define KVM_PRIVATE_MEM_SLOTS 0 #define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
#define KVM_GUESTDBG_SW_BP_MASK \ #define KVM_GUESTDBG_SW_BP_MASK \
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)
...@@ -190,6 +191,7 @@ struct kvm_vcpu_arch { ...@@ -190,6 +191,7 @@ struct kvm_vcpu_arch {
/* vcpu's vpid */ /* vcpu's vpid */
u64 vpid; u64 vpid;
gpa_t flush_gpa;
/* Frequency of stable timer in Hz */ /* Frequency of stable timer in Hz */
u64 timer_mhz; u64 timer_mhz;
......
...@@ -242,6 +242,7 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu) ...@@ -242,6 +242,7 @@ void kvm_check_vpid(struct kvm_vcpu *vcpu)
kvm_update_vpid(vcpu, cpu); kvm_update_vpid(vcpu, cpu);
trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); trace_kvm_vpid_change(vcpu, vcpu->arch.vpid);
vcpu->cpu = cpu; vcpu->cpu = cpu;
kvm_clear_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
} }
/* Restore GSTAT(0x50).vpid */ /* Restore GSTAT(0x50).vpid */
......
...@@ -908,7 +908,8 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) ...@@ -908,7 +908,8 @@ int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write)
return ret; return ret;
/* Invalidate this entry in the TLB */ /* Invalidate this entry in the TLB */
kvm_flush_tlb_gpa(vcpu, gpa); vcpu->arch.flush_gpa = gpa;
kvm_make_request(KVM_REQ_TLB_FLUSH_GPA, vcpu);
return 0; return 0;
} }
......
...@@ -23,10 +23,7 @@ void kvm_flush_tlb_all(void) ...@@ -23,10 +23,7 @@ void kvm_flush_tlb_all(void)
void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa) void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa)
{ {
unsigned long flags; lockdep_assert_irqs_disabled();
local_irq_save(flags);
gpa &= (PAGE_MASK << 1); gpa &= (PAGE_MASK << 1);
invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa); invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa);
local_irq_restore(flags);
} }
...@@ -51,6 +51,16 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu) ...@@ -51,6 +51,16 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
return RESUME_GUEST; return RESUME_GUEST;
} }
static void kvm_late_check_requests(struct kvm_vcpu *vcpu)
{
lockdep_assert_irqs_disabled();
if (kvm_check_request(KVM_REQ_TLB_FLUSH_GPA, vcpu))
if (vcpu->arch.flush_gpa != INVALID_GPA) {
kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa);
vcpu->arch.flush_gpa = INVALID_GPA;
}
}
/* /*
* Check and handle pending signal and vCPU requests etc * Check and handle pending signal and vCPU requests etc
* Run with irq enabled and preempt enabled * Run with irq enabled and preempt enabled
...@@ -101,6 +111,13 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) ...@@ -101,6 +111,13 @@ static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu)
/* Make sure the vcpu mode has been written */ /* Make sure the vcpu mode has been written */
smp_store_mb(vcpu->mode, IN_GUEST_MODE); smp_store_mb(vcpu->mode, IN_GUEST_MODE);
kvm_check_vpid(vcpu); kvm_check_vpid(vcpu);
/*
* Called after function kvm_check_vpid()
* Since it updates CSR.GSTAT used by kvm_flush_tlb_gpa(),
* and it may also clear KVM_REQ_TLB_FLUSH_GPA pending bit
*/
kvm_late_check_requests(vcpu);
vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY);
/* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */ /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */
vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST;
...@@ -1005,6 +1022,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -1005,6 +1022,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
struct loongarch_csrs *csr; struct loongarch_csrs *csr;
vcpu->arch.vpid = 0; vcpu->arch.vpid = 0;
vcpu->arch.flush_gpa = INVALID_GPA;
hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
vcpu->arch.swtimer.function = kvm_swtimer_wakeup; vcpu->arch.swtimer.function = kvm_swtimer_wakeup;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment