Commit 34c0d5a6 authored by Marc Zyngier's avatar Marc Zyngier

Merge branch kvm-arm64/host_data into kvmarm-master/next

* kvm-arm64/host_data:
  : .
  : Rationalise the host-specific data to live as part of the per-CPU state.
  :
  : From the cover letter:
  :
  : "It appears that over the years, we have accumulated a lot of cruft in
  : the kvm_vcpu_arch structure. Part of the gunk is data that is strictly
  : host CPU specific, and this result in two main problems:
  :
  : - the structure itself is stupidly large, over 8kB. With the
  :   arch-agnostic kvm_vcpu, we're above 10kB, which is insane. This has
  :   some ripple effects, as we need physically contiguous allocation to
  :   be able to map it at EL2 for !VHE. There is more to it though, as
  :   some data structures, although per-vcpu, could be allocated
  :   separately.
  :
  : - We lose track of the life-cycle of this data, because we're
  :   guaranteed that it will be around forever and we start relying on
  :   wrong assumptions. This is becoming a maintenance burden.
  :
  : This series rectifies some of these things, starting with the two main
  : offenders: debug and FP, a lot of which gets pushed out to the per-CPU
  : host structure. Indeed, their lifetime really isn't that of the vcpu,
  : but tied to the physical CPU the vpcu runs on.
  :
  : This results in a small reduction of the vcpu size, but mainly a much
  : clearer understanding of the life-cycle of these structures."
  : .
  KVM: arm64: Move management of __hyp_running_vcpu to load/put on VHE
  KVM: arm64: Exclude FP ownership from kvm_vcpu_arch
  KVM: arm64: Exclude host_fpsimd_state pointer from kvm_vcpu_arch
  KVM: arm64: Exclude mdcr_el2_host from kvm_vcpu_arch
  KVM: arm64: Exclude host_debug_data from vcpu_arch
  KVM: arm64: Add accessor for per-CPU state
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parents fec50db7 9a393599
...@@ -588,7 +588,7 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) ...@@ -588,7 +588,7 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
if (!vcpu_has_sve(vcpu) || if (!vcpu_has_sve(vcpu) ||
(vcpu->arch.fp_state != FP_STATE_GUEST_OWNED)) (*host_data_ptr(fp_owner) != FP_STATE_GUEST_OWNED))
val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN; val |= CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN;
if (cpus_have_final_cap(ARM64_SME)) if (cpus_have_final_cap(ARM64_SME))
val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN; val |= CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN;
...@@ -596,7 +596,7 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) ...@@ -596,7 +596,7 @@ static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
val = CPTR_NVHE_EL2_RES1; val = CPTR_NVHE_EL2_RES1;
if (vcpu_has_sve(vcpu) && if (vcpu_has_sve(vcpu) &&
(vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)) (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED))
val |= CPTR_EL2_TZ; val |= CPTR_EL2_TZ;
if (cpus_have_final_cap(ARM64_SME)) if (cpus_have_final_cap(ARM64_SME))
val &= ~CPTR_EL2_TSM; val &= ~CPTR_EL2_TSM;
......
...@@ -530,8 +530,42 @@ struct kvm_cpu_context { ...@@ -530,8 +530,42 @@ struct kvm_cpu_context {
u64 *vncr_array; u64 *vncr_array;
}; };
/*
* This structure is instantiated on a per-CPU basis, and contains
* data that is:
*
* - tied to a single physical CPU, and
* - either have a lifetime that does not extend past vcpu_put()
* - or is an invariant for the lifetime of the system
*
* Use host_data_ptr(field) as a way to access a pointer to such a
* field.
*/
struct kvm_host_data { struct kvm_host_data {
struct kvm_cpu_context host_ctxt; struct kvm_cpu_context host_ctxt;
struct user_fpsimd_state *fpsimd_state; /* hyp VA */
/* Ownership of the FP regs */
enum {
FP_STATE_FREE,
FP_STATE_HOST_OWNED,
FP_STATE_GUEST_OWNED,
} fp_owner;
/*
* host_debug_state contains the host registers which are
* saved and restored during world switches.
*/
struct {
/* {Break,watch}point registers */
struct kvm_guest_debug_arch regs;
/* Statistical profiling extension */
u64 pmscr_el1;
/* Self-hosted trace */
u64 trfcr_el1;
/* Values of trap registers for the host before guest entry. */
u64 mdcr_el2;
} host_debug_state;
}; };
struct kvm_host_psci_config { struct kvm_host_psci_config {
...@@ -592,19 +626,9 @@ struct kvm_vcpu_arch { ...@@ -592,19 +626,9 @@ struct kvm_vcpu_arch {
u64 mdcr_el2; u64 mdcr_el2;
u64 cptr_el2; u64 cptr_el2;
/* Values of trap registers for the host before guest entry. */
u64 mdcr_el2_host;
/* Exception Information */ /* Exception Information */
struct kvm_vcpu_fault_info fault; struct kvm_vcpu_fault_info fault;
/* Ownership of the FP regs */
enum {
FP_STATE_FREE,
FP_STATE_HOST_OWNED,
FP_STATE_GUEST_OWNED,
} fp_state;
/* Configuration flags, set once and for all before the vcpu can run */ /* Configuration flags, set once and for all before the vcpu can run */
u8 cflags; u8 cflags;
...@@ -627,11 +651,10 @@ struct kvm_vcpu_arch { ...@@ -627,11 +651,10 @@ struct kvm_vcpu_arch {
* We maintain more than a single set of debug registers to support * We maintain more than a single set of debug registers to support
* debugging the guest from the host and to maintain separate host and * debugging the guest from the host and to maintain separate host and
* guest state during world switches. vcpu_debug_state are the debug * guest state during world switches. vcpu_debug_state are the debug
* registers of the vcpu as the guest sees them. host_debug_state are * registers of the vcpu as the guest sees them.
* the host registers which are saved and restored during *
* world switches. external_debug_state contains the debug * external_debug_state contains the debug values we want to debug the
* values we want to debug the guest. This is set via the * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl.
* KVM_SET_GUEST_DEBUG ioctl.
* *
* debug_ptr points to the set of debug registers that should be loaded * debug_ptr points to the set of debug registers that should be loaded
* onto the hardware when running the guest. * onto the hardware when running the guest.
...@@ -640,18 +663,8 @@ struct kvm_vcpu_arch { ...@@ -640,18 +663,8 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state; struct kvm_guest_debug_arch external_debug_state;
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
struct task_struct *parent_task; struct task_struct *parent_task;
struct {
/* {Break,watch}point registers */
struct kvm_guest_debug_arch regs;
/* Statistical profiling extension */
u64 pmscr_el1;
/* Self-hosted trace */
u64 trfcr_el1;
} host_debug_state;
/* VGIC state */ /* VGIC state */
struct vgic_cpu vgic_cpu; struct vgic_cpu vgic_cpu;
struct arch_timer_cpu timer_cpu; struct arch_timer_cpu timer_cpu;
...@@ -1168,6 +1181,32 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); ...@@ -1168,6 +1181,32 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data); DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
/*
* How we access per-CPU host data depends on the where we access it from,
* and the mode we're in:
*
* - VHE and nVHE hypervisor bits use their locally defined instance
*
* - the rest of the kernel use either the VHE or nVHE one, depending on
* the mode we're running in.
*
* Unless we're in protected mode, fully deprivileged, and the nVHE
* per-CPU stuff is exclusively accessible to the protected EL2 code.
* In this case, the EL1 code uses the *VHE* data as its private state
* (which makes sense in a way as there shouldn't be any shared state
* between the host and the hypervisor).
*
* Yes, this is all totally trivial. Shoot me now.
*/
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
#define host_data_ptr(f) (&this_cpu_ptr(&kvm_host_data)->f)
#else
#define host_data_ptr(f) \
(static_branch_unlikely(&kvm_protected_mode_initialized) ? \
&this_cpu_ptr(&kvm_host_data)->f : \
&this_cpu_ptr_hyp_sym(kvm_host_data)->f)
#endif
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{ {
/* The host's MPIDR is immutable, so let's set it up at boot time */ /* The host's MPIDR is immutable, so let's set it up at boot time */
......
...@@ -378,12 +378,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -378,12 +378,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
/*
* Default value for the FP state, will be overloaded at load
* time if we support FP (pretty likely)
*/
vcpu->arch.fp_state = FP_STATE_FREE;
/* Set up the timer */ /* Set up the timer */
kvm_timer_vcpu_init(vcpu); kvm_timer_vcpu_init(vcpu);
...@@ -1971,7 +1965,7 @@ static void cpu_set_hyp_vector(void) ...@@ -1971,7 +1965,7 @@ static void cpu_set_hyp_vector(void)
static void cpu_hyp_init_context(void) static void cpu_hyp_init_context(void)
{ {
kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt); kvm_init_host_cpu_context(host_data_ptr(host_ctxt));
if (!is_kernel_in_hyp_mode()) if (!is_kernel_in_hyp_mode())
cpu_init_hyp_mode(); cpu_init_hyp_mode();
......
...@@ -49,8 +49,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) ...@@ -49,8 +49,6 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
if (ret) if (ret)
return ret; return ret;
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
/* /*
* We need to keep current's task_struct pinned until its data has been * We need to keep current's task_struct pinned until its data has been
* unshared with the hypervisor to make sure it is not re-used by the * unshared with the hypervisor to make sure it is not re-used by the
...@@ -86,7 +84,8 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) ...@@ -86,7 +84,8 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
* guest in kvm_arch_vcpu_ctxflush_fp() and override this to * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
* FP_STATE_FREE if the flag set. * FP_STATE_FREE if the flag set.
*/ */
vcpu->arch.fp_state = FP_STATE_HOST_OWNED; *host_data_ptr(fp_owner) = FP_STATE_HOST_OWNED;
*host_data_ptr(fpsimd_state) = kern_hyp_va(&current->thread.uw.fpsimd_state);
vcpu_clear_flag(vcpu, HOST_SVE_ENABLED); vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
...@@ -110,7 +109,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) ...@@ -110,7 +109,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
* been saved, this is very unlikely to happen. * been saved, this is very unlikely to happen.
*/ */
if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) { if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
vcpu->arch.fp_state = FP_STATE_FREE; *host_data_ptr(fp_owner) = FP_STATE_FREE;
fpsimd_save_and_flush_cpu_state(); fpsimd_save_and_flush_cpu_state();
} }
} }
...@@ -126,7 +125,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) ...@@ -126,7 +125,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
{ {
if (test_thread_flag(TIF_FOREIGN_FPSTATE)) if (test_thread_flag(TIF_FOREIGN_FPSTATE))
vcpu->arch.fp_state = FP_STATE_FREE; *host_data_ptr(fp_owner) = FP_STATE_FREE;
} }
/* /*
...@@ -142,7 +141,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) ...@@ -142,7 +141,7 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED) {
/* /*
* Currently we do not support SME guests so SVCR is * Currently we do not support SME guests so SVCR is
...@@ -196,7 +195,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) ...@@ -196,7 +195,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
isb(); isb();
} }
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) { if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED) {
if (vcpu_has_sve(vcpu)) { if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
......
...@@ -135,9 +135,9 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu) ...@@ -135,9 +135,9 @@ static inline void __debug_switch_to_guest_common(struct kvm_vcpu *vcpu)
if (!vcpu_get_flag(vcpu, DEBUG_DIRTY)) if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
return; return;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; host_ctxt = host_data_ptr(host_ctxt);
guest_ctxt = &vcpu->arch.ctxt; guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs; host_dbg = host_data_ptr(host_debug_state.regs);
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
__debug_save_state(host_dbg, host_ctxt); __debug_save_state(host_dbg, host_ctxt);
...@@ -154,9 +154,9 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu) ...@@ -154,9 +154,9 @@ static inline void __debug_switch_to_host_common(struct kvm_vcpu *vcpu)
if (!vcpu_get_flag(vcpu, DEBUG_DIRTY)) if (!vcpu_get_flag(vcpu, DEBUG_DIRTY))
return; return;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; host_ctxt = host_data_ptr(host_ctxt);
guest_ctxt = &vcpu->arch.ctxt; guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs; host_dbg = host_data_ptr(host_debug_state.regs);
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr); guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
__debug_save_state(guest_dbg, guest_ctxt); __debug_save_state(guest_dbg, guest_ctxt);
......
...@@ -42,7 +42,7 @@ extern struct kvm_exception_table_entry __stop___kvm_ex_table; ...@@ -42,7 +42,7 @@ extern struct kvm_exception_table_entry __stop___kvm_ex_table;
/* Check whether the FP regs are owned by the guest */ /* Check whether the FP regs are owned by the guest */
static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu) static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu)
{ {
return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED; return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED;
} }
/* Save the 32-bit only FPSIMD system register state */ /* Save the 32-bit only FPSIMD system register state */
...@@ -155,7 +155,7 @@ static inline bool cpu_has_amu(void) ...@@ -155,7 +155,7 @@ static inline bool cpu_has_amu(void)
static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct kvm *kvm = kern_hyp_va(vcpu->kvm);
CHECK_FGT_MASKS(HFGRTR_EL2); CHECK_FGT_MASKS(HFGRTR_EL2);
...@@ -191,7 +191,7 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu) ...@@ -191,7 +191,7 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpu_context *hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt);
struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct kvm *kvm = kern_hyp_va(vcpu->kvm);
if (!cpus_have_final_cap(ARM64_HAS_FGT)) if (!cpus_have_final_cap(ARM64_HAS_FGT))
...@@ -226,13 +226,13 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) ...@@ -226,13 +226,13 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
write_sysreg(0, pmselr_el0); write_sysreg(0, pmselr_el0);
hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; hctxt = host_data_ptr(host_ctxt);
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0); ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0);
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); vcpu_set_flag(vcpu, PMUSERENR_ON_CPU);
} }
vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); *host_data_ptr(host_debug_state.mdcr_el2) = read_sysreg(mdcr_el2);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
if (cpus_have_final_cap(ARM64_HAS_HCX)) { if (cpus_have_final_cap(ARM64_HAS_HCX)) {
...@@ -254,13 +254,13 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) ...@@ -254,13 +254,13 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu)
static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu)
{ {
write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2); write_sysreg(*host_data_ptr(host_debug_state.mdcr_el2), mdcr_el2);
write_sysreg(0, hstr_el2); write_sysreg(0, hstr_el2);
if (kvm_arm_support_pmu_v3()) { if (kvm_arm_support_pmu_v3()) {
struct kvm_cpu_context *hctxt; struct kvm_cpu_context *hctxt;
hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; hctxt = host_data_ptr(host_ctxt);
write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0); write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0);
vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU);
} }
...@@ -376,8 +376,8 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -376,8 +376,8 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb(); isb();
/* Write out the host state if it's in the registers */ /* Write out the host state if it's in the registers */
if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED) if (*host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED)
__fpsimd_save_state(vcpu->arch.host_fpsimd_state); __fpsimd_save_state(*host_data_ptr(fpsimd_state));
/* Restore the guest state */ /* Restore the guest state */
if (sve_guest) if (sve_guest)
...@@ -389,7 +389,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -389,7 +389,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
if (!(read_sysreg(hcr_el2) & HCR_RW)) if (!(read_sysreg(hcr_el2) & HCR_RW))
write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2); write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2);
vcpu->arch.fp_state = FP_STATE_GUEST_OWNED; *host_data_ptr(fp_owner) = FP_STATE_GUEST_OWNED;
return true; return true;
} }
......
...@@ -83,10 +83,10 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu) ...@@ -83,10 +83,10 @@ void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{ {
/* Disable and flush SPE data generation */ /* Disable and flush SPE data generation */
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE)) if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1); __debug_save_spe(host_data_ptr(host_debug_state.pmscr_el1));
/* Disable and flush Self-Hosted Trace generation */ /* Disable and flush Self-Hosted Trace generation */
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE)) if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
__debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1); __debug_save_trace(host_data_ptr(host_debug_state.trfcr_el1));
} }
void __debug_switch_to_guest(struct kvm_vcpu *vcpu) void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
...@@ -97,9 +97,9 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu) ...@@ -97,9 +97,9 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu) void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{ {
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE)) if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1); __debug_restore_spe(*host_data_ptr(host_debug_state.pmscr_el1));
if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE)) if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1); __debug_restore_trace(*host_data_ptr(host_debug_state.trfcr_el1));
} }
void __debug_switch_to_host(struct kvm_vcpu *vcpu) void __debug_switch_to_host(struct kvm_vcpu *vcpu)
......
...@@ -39,10 +39,8 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) ...@@ -39,10 +39,8 @@ static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2; hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags; hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state;
hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr); hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2; hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2;
...@@ -64,7 +62,6 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu) ...@@ -64,7 +62,6 @@ static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault; host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags; host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state;
host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr; host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
for (i = 0; i < hyp_cpu_if->used_lrs; ++i) for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
......
...@@ -205,7 +205,7 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on) ...@@ -205,7 +205,7 @@ asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
struct psci_boot_args *boot_args; struct psci_boot_args *boot_args;
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; host_ctxt = host_data_ptr(host_ctxt);
if (is_cpu_on) if (is_cpu_on)
boot_args = this_cpu_ptr(&cpu_on_args); boot_args = this_cpu_ptr(&cpu_on_args);
......
...@@ -257,8 +257,7 @@ static int fix_hyp_pgtable_refcnt(void) ...@@ -257,8 +257,7 @@ static int fix_hyp_pgtable_refcnt(void)
void __noreturn __pkvm_init_finalise(void) void __noreturn __pkvm_init_finalise(void)
{ {
struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data); struct kvm_cpu_context *host_ctxt = host_data_ptr(host_ctxt);
struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
unsigned long nr_pages, reserved_pages, pfn; unsigned long nr_pages, reserved_pages, pfn;
int ret; int ret;
......
...@@ -264,7 +264,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -264,7 +264,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
pmr_sync(); pmr_sync();
} }
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; host_ctxt = host_data_ptr(host_ctxt);
host_ctxt->__hyp_running_vcpu = vcpu; host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt; guest_ctxt = &vcpu->arch.ctxt;
...@@ -337,7 +337,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -337,7 +337,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg_restore_state_nvhe(host_ctxt); __sysreg_restore_state_nvhe(host_ctxt);
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED)
__fpsimd_save_fpexc32(vcpu); __fpsimd_save_fpexc32(vcpu);
__debug_switch_to_host(vcpu); __debug_switch_to_host(vcpu);
...@@ -367,7 +367,7 @@ asmlinkage void __noreturn hyp_panic(void) ...@@ -367,7 +367,7 @@ asmlinkage void __noreturn hyp_panic(void)
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; host_ctxt = host_data_ptr(host_ctxt);
vcpu = host_ctxt->__hyp_running_vcpu; vcpu = host_ctxt->__hyp_running_vcpu;
if (vcpu) { if (vcpu) {
......
...@@ -162,6 +162,8 @@ static void __vcpu_put_deactivate_traps(struct kvm_vcpu *vcpu) ...@@ -162,6 +162,8 @@ static void __vcpu_put_deactivate_traps(struct kvm_vcpu *vcpu)
void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu) void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu)
{ {
host_data_ptr(host_ctxt)->__hyp_running_vcpu = vcpu;
__vcpu_load_switch_sysregs(vcpu); __vcpu_load_switch_sysregs(vcpu);
__vcpu_load_activate_traps(vcpu); __vcpu_load_activate_traps(vcpu);
__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch); __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
...@@ -171,6 +173,8 @@ void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu) ...@@ -171,6 +173,8 @@ void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu)
{ {
__vcpu_put_deactivate_traps(vcpu); __vcpu_put_deactivate_traps(vcpu);
__vcpu_put_switch_sysregs(vcpu); __vcpu_put_switch_sysregs(vcpu);
host_data_ptr(host_ctxt)->__hyp_running_vcpu = NULL;
} }
static const exit_handler_fn hyp_exit_handlers[] = { static const exit_handler_fn hyp_exit_handlers[] = {
...@@ -221,8 +225,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -221,8 +225,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt; struct kvm_cpu_context *guest_ctxt;
u64 exit_code; u64 exit_code;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; host_ctxt = host_data_ptr(host_ctxt);
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt; guest_ctxt = &vcpu->arch.ctxt;
sysreg_save_host_state_vhe(host_ctxt); sysreg_save_host_state_vhe(host_ctxt);
...@@ -258,7 +261,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) ...@@ -258,7 +261,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
sysreg_restore_host_state_vhe(host_ctxt); sysreg_restore_host_state_vhe(host_ctxt);
if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED) if (*host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED)
__fpsimd_save_fpexc32(vcpu); __fpsimd_save_fpexc32(vcpu);
__debug_switch_to_host(vcpu); __debug_switch_to_host(vcpu);
...@@ -306,7 +309,7 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par) ...@@ -306,7 +309,7 @@ static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
struct kvm_vcpu *vcpu; struct kvm_vcpu *vcpu;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; host_ctxt = host_data_ptr(host_ctxt);
vcpu = host_ctxt->__hyp_running_vcpu; vcpu = host_ctxt->__hyp_running_vcpu;
__deactivate_traps(vcpu); __deactivate_traps(vcpu);
......
...@@ -67,7 +67,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu) ...@@ -67,7 +67,7 @@ void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; host_ctxt = host_data_ptr(host_ctxt);
__sysreg_save_user_state(host_ctxt); __sysreg_save_user_state(host_ctxt);
/* /*
...@@ -110,7 +110,7 @@ void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu) ...@@ -110,7 +110,7 @@ void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
struct kvm_cpu_context *host_ctxt; struct kvm_cpu_context *host_ctxt;
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; host_ctxt = host_data_ptr(host_ctxt);
__sysreg_save_el1_state(guest_ctxt); __sysreg_save_el1_state(guest_ctxt);
__sysreg_save_user_state(guest_ctxt); __sysreg_save_user_state(guest_ctxt);
......
...@@ -232,7 +232,7 @@ bool kvm_set_pmuserenr(u64 val) ...@@ -232,7 +232,7 @@ bool kvm_set_pmuserenr(u64 val)
if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU)) if (!vcpu || !vcpu_get_flag(vcpu, PMUSERENR_ON_CPU))
return false; return false;
hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; hctxt = host_data_ptr(host_ctxt);
ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val; ctxt_sys_reg(hctxt, PMUSERENR_EL0) = val;
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment