Commit 72c13445 authored by James Hogan's avatar James Hogan Committed by Greg Kroah-Hartman

KVM: MIPS: Drop other CPU ASIDs on guest MMU changes

commit 91e4f1b6 upstream.

When a guest TLB entry is replaced by TLBWI or TLBWR, we only invalidate
TLB entries on the local CPU. This doesn't work correctly on an SMP host
when the guest is migrated to a different physical CPU, as it could pick
up stale TLB mappings from the last time the vCPU ran on that physical
CPU.

Therefore invalidate both user and kernel host ASIDs on other CPUs,
which will cause new ASIDs to be generated when it next runs on those
CPUs.

We're careful only to do this if the TLB entry was already valid, and
only for the kernel ASID where the virtual address it mapped is outside
of the guest user address range.
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Cc: <stable@vger.kernel.org> # 3.17.x-
[james.hogan@imgtec.com: Backport to 3.17..4.4]
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c57deabd
......@@ -807,6 +807,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
return EMULATE_FAIL;
}
/**
* kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
* @vcpu: VCPU with changed mappings.
* @tlb: TLB entry being removed.
*
* This is called to indicate a single change in guest MMU mappings, so that we
* can arrange TLB flushes on this and other CPUs.
*/
static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
struct kvm_mips_tlb *tlb)
{
int cpu, i;
bool user;
/* No need to flush for entries which are already invalid */
if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
return;
/* User address space doesn't need flushing for KSeg2/3 changes */
user = tlb->tlb_hi < KVM_GUEST_KSEG0;
preempt_disable();
/*
* Probe the shadow host TLB for the entry being overwritten, if one
* matches, invalidate it
*/
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
/* Invalidate the whole ASID on other CPUs */
cpu = smp_processor_id();
for_each_possible_cpu(i) {
if (i == cpu)
continue;
if (user)
vcpu->arch.guest_user_asid[i] = 0;
vcpu->arch.guest_kernel_asid[i] = 0;
}
preempt_enable();
}
/* Write Guest TLB Entry @ Index */
enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
{
......@@ -826,11 +867,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
}
tlb = &vcpu->arch.guest_tlb[index];
/*
* Probe the shadow host TLB for the entry being overwritten, if one
* matches, invalidate it
*/
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
kvm_mips_invalidate_guest_tlb(vcpu, tlb);
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
......@@ -859,11 +897,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
tlb = &vcpu->arch.guest_tlb[index];
/*
* Probe the shadow host TLB for the entry being overwritten, if one
* matches, invalidate it
*/
kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
kvm_mips_invalidate_guest_tlb(vcpu, tlb);
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
......@@ -982,6 +1016,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
int32_t rt, rd, copz, sel, co_bit, op;
uint32_t pc = vcpu->arch.pc;
unsigned long curr_pc;
int cpu, i;
/*
* Update PC and hold onto current PC in case there is
......@@ -1089,8 +1124,16 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
vcpu->arch.gprs[rt]
& ASID_MASK);
preempt_disable();
/* Blow away the shadow host TLBs */
kvm_mips_flush_host_tlb(1);
cpu = smp_processor_id();
for_each_possible_cpu(i)
if (i != cpu) {
vcpu->arch.guest_user_asid[i] = 0;
vcpu->arch.guest_kernel_asid[i] = 0;
}
preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0,
vcpu->arch.gprs[rt]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment