Commit 0b7aa583 authored by Tianjia Zhang's avatar Tianjia Zhang Committed by Paolo Bonzini

KVM: MIPS: clean up redundant kvm_run parameters in assembly

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.
Signed-off-by: default avatarTianjia Zhang <tianjia.zhang@linux.alibaba.com>
Reviewed-by: default avatarHuacai Chen <chenhc@lemote.com>
Tested-by: default avatarJiaxun Yang <jiaxun.yang@flygoat.com>
Message-Id: <20200623131418.31473-6-tianjia.zhang@linux.alibaba.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 95b28ac9
...@@ -341,7 +341,7 @@ struct kvm_mips_tlb { ...@@ -341,7 +341,7 @@ struct kvm_mips_tlb {
#define KVM_MIPS_GUEST_TLB_SIZE 64 #define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch { struct kvm_vcpu_arch {
void *guest_ebase; void *guest_ebase;
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); int (*vcpu_run)(struct kvm_vcpu *vcpu);
/* Host registers preserved across guest mode execution */ /* Host registers preserved across guest mode execution */
unsigned long host_stack; unsigned long host_stack;
...@@ -852,7 +852,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); ...@@ -852,7 +852,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
/* Debug: dump vcpu state */ /* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
/* Building of entry/exception code */ /* Building of entry/exception code */
int kvm_mips_entry_setup(void); int kvm_mips_entry_setup(void);
......
...@@ -205,7 +205,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int reg) ...@@ -205,7 +205,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int reg)
* Assemble the start of the vcpu_run function to run a guest VCPU. The function * Assemble the start of the vcpu_run function to run a guest VCPU. The function
* conforms to the following prototype: * conforms to the following prototype:
* *
* int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); * int vcpu_run(struct kvm_vcpu *vcpu);
* *
* The exit from the guest and return to the caller is handled by the code * The exit from the guest and return to the caller is handled by the code
* generated by kvm_mips_build_ret_to_host(). * generated by kvm_mips_build_ret_to_host().
...@@ -218,8 +218,7 @@ void *kvm_mips_build_vcpu_run(void *addr) ...@@ -218,8 +218,7 @@ void *kvm_mips_build_vcpu_run(void *addr)
unsigned int i; unsigned int i;
/* /*
* A0: run * A0: vcpu
* A1: vcpu
*/ */
/* k0/k1 not being used in host kernel context */ /* k0/k1 not being used in host kernel context */
...@@ -238,10 +237,10 @@ void *kvm_mips_build_vcpu_run(void *addr) ...@@ -238,10 +237,10 @@ void *kvm_mips_build_vcpu_run(void *addr)
kvm_mips_build_save_scratch(&p, V1, K1); kvm_mips_build_save_scratch(&p, V1, K1);
/* VCPU scratch register has pointer to vcpu */ /* VCPU scratch register has pointer to vcpu */
UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]);
/* Offset into vcpu->arch */ /* Offset into vcpu->arch */
UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch));
/* /*
* Save the host stack to VCPU, used for exception processing * Save the host stack to VCPU, used for exception processing
...@@ -645,10 +644,7 @@ void *kvm_mips_build_exit(void *addr) ...@@ -645,10 +644,7 @@ void *kvm_mips_build_exit(void *addr)
/* Now that context has been saved, we can use other registers */ /* Now that context has been saved, we can use other registers */
/* Restore vcpu */ /* Restore vcpu */
UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
/* Restore run (vcpu->run) */
UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1);
/* /*
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
...@@ -810,7 +806,6 @@ void *kvm_mips_build_exit(void *addr) ...@@ -810,7 +806,6 @@ void *kvm_mips_build_exit(void *addr)
* with this in the kernel * with this in the kernel
*/ */
uasm_i_move(&p, A0, S0); uasm_i_move(&p, A0, S0);
uasm_i_move(&p, A1, S1);
UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
uasm_i_jalr(&p, RA, T9); uasm_i_jalr(&p, RA, T9);
UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
...@@ -852,7 +847,7 @@ static void *kvm_mips_build_ret_from_exit(void *addr) ...@@ -852,7 +847,7 @@ static void *kvm_mips_build_ret_from_exit(void *addr)
* guest, reload k1 * guest, reload k1
*/ */
uasm_i_move(&p, K1, S1); uasm_i_move(&p, K1, S0);
UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
/* /*
...@@ -886,8 +881,8 @@ static void *kvm_mips_build_ret_to_guest(void *addr) ...@@ -886,8 +881,8 @@ static void *kvm_mips_build_ret_to_guest(void *addr)
{ {
u32 *p = addr; u32 *p = addr;
/* Put the saved pointer to vcpu (s1) back into the scratch register */ /* Put the saved pointer to vcpu (s0) back into the scratch register */
UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]);
/* Load up the Guest EBASE to minimize the window where BEV is set */ /* Load up the Guest EBASE to minimize the window where BEV is set */
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
......
...@@ -1199,8 +1199,9 @@ static void kvm_mips_set_c0_status(void) ...@@ -1199,8 +1199,9 @@ static void kvm_mips_set_c0_status(void)
/* /*
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
*/ */
int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
u32 __user *opc = (u32 __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
......
...@@ -1241,7 +1241,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -1241,7 +1241,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
*/ */
kvm_mips_suspend_mm(cpu); kvm_mips_suspend_mm(cpu);
r = vcpu->arch.vcpu_run(vcpu->run, vcpu); r = vcpu->arch.vcpu_run(vcpu);
/* We may have migrated while handling guest exits */ /* We may have migrated while handling guest exits */
cpu = smp_processor_id(); cpu = smp_processor_id();
......
...@@ -3266,7 +3266,7 @@ static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3266,7 +3266,7 @@ static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
kvm_vz_vcpu_load_tlb(vcpu, cpu); kvm_vz_vcpu_load_tlb(vcpu, cpu);
kvm_vz_vcpu_load_wired(vcpu); kvm_vz_vcpu_load_wired(vcpu);
r = vcpu->arch.vcpu_run(vcpu->run, vcpu); r = vcpu->arch.vcpu_run(vcpu);
kvm_vz_vcpu_save_wired(vcpu); kvm_vz_vcpu_save_wired(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment