Commit fae5c9f3 authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

KVM: PPC: Book3S HV: remove ISA v3.0 and v3.1 support from P7/8 path

POWER9 and later processors always go via the P9 guest entry path now.
Remove the remaining support from the P7/8 path.
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210528090752.3542186-33-npiggin@gmail.com
parent 0bf7e1b2
...@@ -130,9 +130,6 @@ static inline bool nesting_enabled(struct kvm *kvm) ...@@ -130,9 +130,6 @@ static inline bool nesting_enabled(struct kvm *kvm)
return kvm->arch.nested_enable && kvm_is_radix(kvm); return kvm->arch.nested_enable && kvm_is_radix(kvm);
} }
/* If set, the threads on each CPU core have to be in the same MMU mode */
static bool no_mixing_hpt_and_radix __read_mostly;
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
/* /*
...@@ -3133,9 +3130,6 @@ static void prepare_threads(struct kvmppc_vcore *vc) ...@@ -3133,9 +3130,6 @@ static void prepare_threads(struct kvmppc_vcore *vc)
for_each_runnable_thread(i, vcpu, vc) { for_each_runnable_thread(i, vcpu, vc) {
if (signal_pending(vcpu->arch.run_task)) if (signal_pending(vcpu->arch.run_task))
vcpu->arch.ret = -EINTR; vcpu->arch.ret = -EINTR;
else if (no_mixing_hpt_and_radix &&
kvm_is_radix(vc->kvm) != radix_enabled())
vcpu->arch.ret = -EINVAL;
else if (vcpu->arch.vpa.update_pending || else if (vcpu->arch.vpa.update_pending ||
vcpu->arch.slb_shadow.update_pending || vcpu->arch.slb_shadow.update_pending ||
vcpu->arch.dtl.update_pending) vcpu->arch.dtl.update_pending)
...@@ -3342,6 +3336,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -3342,6 +3336,9 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
int trap; int trap;
bool is_power8; bool is_power8;
if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300)))
return;
/* /*
* Remove from the list any threads that have a signal pending * Remove from the list any threads that have a signal pending
* or need a VPA update done * or need a VPA update done
...@@ -3369,9 +3366,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -3369,9 +3366,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Make sure we are running on primary threads, and that secondary * Make sure we are running on primary threads, and that secondary
* threads are offline. Also check if the number of threads in this * threads are offline. Also check if the number of threads in this
* guest are greater than the current system threads per guest. * guest are greater than the current system threads per guest.
* On POWER9, we need to be not in independent-threads mode if
* this is a HPT guest on a radix host machine where the
* CPU threads may not be in different MMU modes.
*/ */
if ((controlled_threads > 1) && if ((controlled_threads > 1) &&
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
...@@ -3395,18 +3389,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -3395,18 +3389,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
if (vc->num_threads < target_threads) if (vc->num_threads < target_threads)
collect_piggybacks(&core_info, target_threads); collect_piggybacks(&core_info, target_threads);
/*
* On radix, arrange for TLB flushing if necessary.
* This has to be done before disabling interrupts since
* it uses smp_call_function().
*/
pcpu = smp_processor_id();
if (kvm_is_radix(vc->kvm)) {
for (sub = 0; sub < core_info.n_subcores; ++sub)
for_each_runnable_thread(i, vcpu, core_info.vc[sub])
kvmppc_prepare_radix_vcpu(vcpu, pcpu);
}
/* /*
* Hard-disable interrupts, and check resched flag and signals. * Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up * If we need to reschedule or deliver a signal, clean up
...@@ -3439,8 +3421,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) ...@@ -3439,8 +3421,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
cmd_bit = stat_bit = 0; cmd_bit = stat_bit = 0;
split = core_info.n_subcores; split = core_info.n_subcores;
sip = NULL; sip = NULL;
is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S) is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S);
&& !cpu_has_feature(CPU_FTR_ARCH_300);
if (split > 1) { if (split > 1) {
sip = &split_info; sip = &split_info;
...@@ -3738,8 +3719,7 @@ static inline bool hcall_is_xics(unsigned long req) ...@@ -3738,8 +3719,7 @@ static inline bool hcall_is_xics(unsigned long req)
} }
/* /*
* Virtual-mode guest entry for POWER9 and later when the host and * Guest entry for POWER9 and later CPUs.
* guest are both using the radix MMU. The LPIDR has already been set.
*/ */
static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr) unsigned long lpcr)
...@@ -5762,11 +5742,25 @@ static int kvmhv_enable_dawr1(struct kvm *kvm) ...@@ -5762,11 +5742,25 @@ static int kvmhv_enable_dawr1(struct kvm *kvm)
static bool kvmppc_hash_v3_possible(void) static bool kvmppc_hash_v3_possible(void)
{ {
if (radix_enabled() && no_mixing_hpt_and_radix) if (!cpu_has_feature(CPU_FTR_ARCH_300))
return false;
if (!cpu_has_feature(CPU_FTR_HVMODE))
return false;
/*
* POWER9 chips before version 2.02 can't have some threads in
* HPT mode and some in radix mode on the same core.
*/
if (radix_enabled()) {
unsigned int pvr = mfspr(SPRN_PVR);
if ((pvr >> 16) == PVR_POWER9 &&
(((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
return false; return false;
}
return cpu_has_feature(CPU_FTR_ARCH_300) && return true;
cpu_has_feature(CPU_FTR_HVMODE);
} }
static struct kvmppc_ops kvm_ops_hv = { static struct kvmppc_ops kvm_ops_hv = {
...@@ -5910,18 +5904,6 @@ static int kvmppc_book3s_init_hv(void) ...@@ -5910,18 +5904,6 @@ static int kvmppc_book3s_init_hv(void)
if (kvmppc_radix_possible()) if (kvmppc_radix_possible())
r = kvmppc_radix_init(); r = kvmppc_radix_init();
/*
* POWER9 chips before version 2.02 can't have some threads in
* HPT mode and some in radix mode on the same core.
*/
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
unsigned int pvr = mfspr(SPRN_PVR);
if ((pvr >> 16) == PVR_POWER9 &&
(((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) ||
((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101)))
no_mixing_hpt_and_radix = true;
}
r = kvmppc_uvmem_init(); r = kvmppc_uvmem_init();
if (r < 0) if (r < 0)
pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r); pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r);
......
...@@ -58,7 +58,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -58,7 +58,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* /*
* Put whatever is in the decrementer into the * Put whatever is in the decrementer into the
* hypervisor decrementer. * hypervisor decrementer.
* Because of a hardware deviation in P8 and P9, * Because of a hardware deviation in P8,
* we need to set LPCR[HDICE] before writing HDEC. * we need to set LPCR[HDICE] before writing HDEC.
*/ */
ld r5, HSTATE_KVM_VCORE(r13) ld r5, HSTATE_KVM_VCORE(r13)
...@@ -67,15 +67,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ...@@ -67,15 +67,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
ori r8, r9, LPCR_HDICE ori r8, r9, LPCR_HDICE
mtspr SPRN_LPCR, r8 mtspr SPRN_LPCR, r8
isync isync
andis. r0, r9, LPCR_LD@h
mfspr r8,SPRN_DEC mfspr r8,SPRN_DEC
mftb r7 mftb r7
BEGIN_FTR_SECTION
/* On POWER9, don't sign-extend if host LPCR[LD] bit is set */
bne 32f
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
extsw r8,r8 extsw r8,r8
32: mtspr SPRN_HDEC,r8 mtspr SPRN_HDEC,r8
add r8,r8,r7 add r8,r8,r7
std r8,HSTATE_DECEXP(r13) std r8,HSTATE_DECEXP(r13)
......
This diff is collapsed.
...@@ -604,7 +604,7 @@ struct p9_sprs { ...@@ -604,7 +604,7 @@ struct p9_sprs {
u64 uamor; u64 uamor;
}; };
static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) static unsigned long power9_idle_stop(unsigned long psscr)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu); int first = cpu_first_thread_sibling(cpu);
...@@ -620,8 +620,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) ...@@ -620,8 +620,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */ /* EC=ESL=0 case */
BUG_ON(!mmu_on);
/* /*
* Wake synchronously. SRESET via xscom may still cause * Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason! * a 0x100 powersave wakeup with SRR1 reason!
...@@ -803,7 +801,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on) ...@@ -803,7 +801,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
__slb_restore_bolted_realmode(); __slb_restore_bolted_realmode();
out: out:
if (mmu_on)
mtmsr(MSR_KERNEL); mtmsr(MSR_KERNEL);
return srr1; return srr1;
...@@ -895,7 +892,7 @@ struct p10_sprs { ...@@ -895,7 +892,7 @@ struct p10_sprs {
*/ */
}; };
static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on) static unsigned long power10_idle_stop(unsigned long psscr)
{ {
int cpu = raw_smp_processor_id(); int cpu = raw_smp_processor_id();
int first = cpu_first_thread_sibling(cpu); int first = cpu_first_thread_sibling(cpu);
...@@ -909,8 +906,6 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on) ...@@ -909,8 +906,6 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
if (!(psscr & (PSSCR_EC|PSSCR_ESL))) { if (!(psscr & (PSSCR_EC|PSSCR_ESL))) {
/* EC=ESL=0 case */ /* EC=ESL=0 case */
BUG_ON(!mmu_on);
/* /*
* Wake synchronously. SRESET via xscom may still cause * Wake synchronously. SRESET via xscom may still cause
* a 0x100 powersave wakeup with SRR1 reason! * a 0x100 powersave wakeup with SRR1 reason!
...@@ -991,7 +986,6 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on) ...@@ -991,7 +986,6 @@ static unsigned long power10_idle_stop(unsigned long psscr, bool mmu_on)
__slb_restore_bolted_realmode(); __slb_restore_bolted_realmode();
out: out:
if (mmu_on)
mtmsr(MSR_KERNEL); mtmsr(MSR_KERNEL);
return srr1; return srr1;
...@@ -1002,40 +996,10 @@ static unsigned long arch300_offline_stop(unsigned long psscr) ...@@ -1002,40 +996,10 @@ static unsigned long arch300_offline_stop(unsigned long psscr)
{ {
unsigned long srr1; unsigned long srr1;
#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
__ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31)) if (cpu_has_feature(CPU_FTR_ARCH_31))
srr1 = power10_idle_stop(psscr, true); srr1 = power10_idle_stop(psscr);
else else
srr1 = power9_idle_stop(psscr, true); srr1 = power9_idle_stop(psscr);
__ppc64_runlatch_on();
#else
/*
* Tell KVM we're entering idle.
* This does not have to be done in real mode because the P9 MMU
* is independent per-thread. Some steppings share radix/hash mode
* between threads, but in that case KVM has a barrier sync in real
* mode before and after switching between radix and hash.
*
* kvm_start_guest must still be called in real mode though, hence
* the false argument.
*/
local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_IDLE;
__ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31))
srr1 = power10_idle_stop(psscr, false);
else
srr1 = power9_idle_stop(psscr, false);
__ppc64_runlatch_on();
local_paca->kvm_hstate.hwthread_state = KVM_HWTHREAD_IN_KERNEL;
/* Order setting hwthread_state vs. testing hwthread_req */
smp_mb();
if (local_paca->kvm_hstate.hwthread_req)
srr1 = idle_kvm_start_guest(srr1);
mtmsr(MSR_KERNEL);
#endif
return srr1; return srr1;
} }
...@@ -1055,9 +1019,9 @@ void arch300_idle_type(unsigned long stop_psscr_val, ...@@ -1055,9 +1019,9 @@ void arch300_idle_type(unsigned long stop_psscr_val,
__ppc64_runlatch_off(); __ppc64_runlatch_off();
if (cpu_has_feature(CPU_FTR_ARCH_31)) if (cpu_has_feature(CPU_FTR_ARCH_31))
srr1 = power10_idle_stop(psscr, true); srr1 = power10_idle_stop(psscr);
else else
srr1 = power9_idle_stop(psscr, true); srr1 = power9_idle_stop(psscr);
__ppc64_runlatch_on(); __ppc64_runlatch_on();
fini_irq_for_idle_irqsoff(); fini_irq_for_idle_irqsoff();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment