Commit 7c5b06ca authored by Paul Mackerras's avatar Paul Mackerras

KVM: PPC: Book3S HV: Adapt TLB invalidations to work on POWER9

POWER9 adds new capabilities to the tlbie (TLB invalidate entry)
and tlbiel (local tlbie) instructions.  Both instructions get a
set of new parameters (RIC, PRS and R) which appear as bits in the
instruction word.  The tlbiel instruction now has a second register
operand, which contains a PID and/or LPID value if needed, and
should otherwise contain 0.

This adapts KVM-HV's usage of tlbie and tlbiel to work on POWER9
as well as older processors.  Since we only handle HPT guests so
far, we need RIC=0 PRS=0 R=0, which ends up with the same instruction
word as on previous processors, so we don't need to conditionally
execute different instructions depending on the processor.

The local flush on first entry to a guest in book3s_hv_rmhandlers.S
is a loop which depends on the number of TLB sets.  Rather than
using feature sections to set the number of iterations based on
which CPU we're on, we now work out this number at VM creation time
and store it in the kvm_arch struct.  That will make it possible to
get the number from the device tree in future, which will help with
compatibility with future processors.

Since mmu_partition_table_set_entry() does a global flush of the
whole LPID, we don't need to do the TLB flush on first entry to the
guest on each processor.  Therefore we don't set all bits in the
tlb_need_flush bitmap on VM startup on POWER9.
Signed-off-by: default avatarPaul Mackerras <paulus@ozlabs.org>
parent e9cf1e08
...@@ -244,6 +244,7 @@ struct kvm_arch_memory_slot { ...@@ -244,6 +244,7 @@ struct kvm_arch_memory_slot {
struct kvm_arch { struct kvm_arch {
unsigned int lpid; unsigned int lpid;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
unsigned int tlb_sets;
unsigned long hpt_virt; unsigned long hpt_virt;
struct revmap_entry *revmap; struct revmap_entry *revmap;
atomic64_t mmio_update; atomic64_t mmio_update;
......
...@@ -487,6 +487,7 @@ int main(void) ...@@ -487,6 +487,7 @@ int main(void)
/* book3s */ /* book3s */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
DEFINE(KVM_TLB_SETS, offsetof(struct kvm, arch.tlb_sets));
DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
......
...@@ -3265,8 +3265,11 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) ...@@ -3265,8 +3265,11 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
* Since we don't flush the TLB when tearing down a VM, * Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used, * and this lpid might have previously been used,
* make sure we flush on each core before running the new VM. * make sure we flush on each core before running the new VM.
* On POWER9, the tlbie in mmu_partition_table_set_entry()
* does this flush for us.
*/ */
cpumask_setall(&kvm->arch.need_tlb_flush); if (!cpu_has_feature(CPU_FTR_ARCH_300))
cpumask_setall(&kvm->arch.need_tlb_flush);
/* Start out with the default set of hcalls enabled */ /* Start out with the default set of hcalls enabled */
memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
...@@ -3291,6 +3294,17 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) ...@@ -3291,6 +3294,17 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
lpcr &= ~LPCR_VPM0; lpcr &= ~LPCR_VPM0;
kvm->arch.lpcr = lpcr; kvm->arch.lpcr = lpcr;
/*
* Work out how many sets the TLB has, for the use of
* the TLB invalidation loop in book3s_hv_rmhandlers.S.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300))
kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */
else if (cpu_has_feature(CPU_FTR_ARCH_207S))
kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */
else
kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */
/* /*
* Track that we now have a HV mode VM active. This blocks secondary * Track that we now have a HV mode VM active. This blocks secondary
* CPU threads from coming online. * CPU threads from coming online.
...@@ -3733,3 +3747,4 @@ module_exit(kvmppc_book3s_exit_hv); ...@@ -3733,3 +3747,4 @@ module_exit(kvmppc_book3s_exit_hv);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(KVM_MINOR); MODULE_ALIAS_MISCDEV(KVM_MINOR);
MODULE_ALIAS("devname:kvm"); MODULE_ALIAS("devname:kvm");
...@@ -425,13 +425,18 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, ...@@ -425,13 +425,18 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
{ {
long i; long i;
/*
* We use the POWER9 5-operand versions of tlbie and tlbiel here.
* Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
* the RS field, this is backwards-compatible with P7 and P8.
*/
if (global) { if (global) {
while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
cpu_relax(); cpu_relax();
if (need_sync) if (need_sync)
asm volatile("ptesync" : : : "memory"); asm volatile("ptesync" : : : "memory");
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
asm volatile(PPC_TLBIE(%1,%0) : : asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
"r" (rbvalues[i]), "r" (kvm->arch.lpid)); "r" (rbvalues[i]), "r" (kvm->arch.lpid));
asm volatile("eieio; tlbsync; ptesync" : : : "memory"); asm volatile("eieio; tlbsync; ptesync" : : : "memory");
kvm->arch.tlbie_lock = 0; kvm->arch.tlbie_lock = 0;
...@@ -439,7 +444,8 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, ...@@ -439,7 +444,8 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
if (need_sync) if (need_sync)
asm volatile("ptesync" : : : "memory"); asm volatile("ptesync" : : : "memory");
for (i = 0; i < npages; ++i) for (i = 0; i < npages; ++i)
asm volatile("tlbiel %0" : : "r" (rbvalues[i])); asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
"r" (rbvalues[i]), "r" (0));
asm volatile("ptesync" : : : "memory"); asm volatile("ptesync" : : : "memory");
} }
} }
......
...@@ -613,12 +613,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) ...@@ -613,12 +613,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
stdcx. r7,0,r6 stdcx. r7,0,r6
bne 23b bne 23b
/* Flush the TLB of any entries for this LPID */ /* Flush the TLB of any entries for this LPID */
/* use arch 2.07S as a proxy for POWER8 */ lwz r6,KVM_TLB_SETS(r9)
BEGIN_FTR_SECTION li r0,0 /* RS for P9 version of tlbiel */
li r6,512 /* POWER8 has 512 sets */
FTR_SECTION_ELSE
li r6,128 /* POWER7 has 128 sets */
ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
mtctr r6 mtctr r6
li r7,0x800 /* IS field = 0b10 */ li r7,0x800 /* IS field = 0b10 */
ptesync ptesync
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment