Commit c34b26b9 authored by Tianjia Zhang's avatar Tianjia Zhang Committed by Paolo Bonzini

KVM: MIPS: clean up redundant 'kvm_run' parameters

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.
Signed-off-by: default avatarTianjia Zhang <tianjia.zhang@linux.alibaba.com>
Reviewed-by: default avatarHuacai Chen <chenhc@lemote.com>
Message-Id: <20200623131418.31473-5-tianjia.zhang@linux.alibaba.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 74cc7e0c
...@@ -843,8 +843,8 @@ struct kvm_mips_callbacks { ...@@ -843,8 +843,8 @@ struct kvm_mips_callbacks {
const struct kvm_one_reg *reg, s64 v); const struct kvm_one_reg *reg, s64 v);
int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu); int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); int (*vcpu_run)(struct kvm_vcpu *vcpu);
void (*vcpu_reenter)(struct kvm_run *run, struct kvm_vcpu *vcpu); void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
}; };
extern struct kvm_mips_callbacks *kvm_mips_callbacks; extern struct kvm_mips_callbacks *kvm_mips_callbacks;
int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
...@@ -899,7 +899,6 @@ extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, ...@@ -899,7 +899,6 @@ extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu, struct kvm_vcpu *vcpu,
bool write_fault); bool write_fault);
...@@ -1010,83 +1009,67 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu) ...@@ -1010,83 +1009,67 @@ static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
extern enum emulation_result kvm_mips_emulate_inst(u32 cause, extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu); long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_syscall(u32 cause, extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_handle_ri(u32 cause, extern enum emulation_result kvm_mips_handle_ri(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
struct kvm_run *run);
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu); u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count); void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
...@@ -1115,26 +1098,21 @@ static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {} ...@@ -1115,26 +1098,21 @@ static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
enum emulation_result kvm_mips_check_privilege(u32 cause, enum emulation_result kvm_mips_check_privilege(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
u32 *opc, u32 *opc,
u32 cause, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
u32 *opc, u32 *opc,
u32 cause, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
u32 cause, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
/* COP0 */ /* COP0 */
......
...@@ -1262,7 +1262,6 @@ unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) ...@@ -1262,7 +1262,6 @@ unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
u32 *opc, u32 cause, u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -1597,12 +1596,12 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, ...@@ -1597,12 +1596,12 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
u32 cause, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
int r; int r;
enum emulation_result er; enum emulation_result er;
u32 rt; u32 rt;
struct kvm_run *run = vcpu->run;
void *data = run->mmio.data; void *data = run->mmio.data;
unsigned int imme; unsigned int imme;
unsigned long curr_pc; unsigned long curr_pc;
...@@ -1894,9 +1893,9 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, ...@@ -1894,9 +1893,9 @@ enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
} }
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
u32 cause, struct kvm_run *run, u32 cause, struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
int r; int r;
enum emulation_result er; enum emulation_result er;
unsigned long curr_pc; unsigned long curr_pc;
...@@ -2136,7 +2135,6 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, ...@@ -2136,7 +2135,6 @@ enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
unsigned long curr_pc, unsigned long curr_pc,
unsigned long addr, unsigned long addr,
struct kvm_run *run,
struct kvm_vcpu *vcpu, struct kvm_vcpu *vcpu,
u32 cause) u32 cause)
{ {
...@@ -2164,13 +2162,13 @@ static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), ...@@ -2164,13 +2162,13 @@ static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
/* no matching guest TLB */ /* no matching guest TLB */
vcpu->arch.host_cp0_badvaddr = addr; vcpu->arch.host_cp0_badvaddr = addr;
vcpu->arch.pc = curr_pc; vcpu->arch.pc = curr_pc;
kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, vcpu); kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu);
return EMULATE_EXCEPT; return EMULATE_EXCEPT;
case KVM_MIPS_TLBINV: case KVM_MIPS_TLBINV:
/* invalid matching guest TLB */ /* invalid matching guest TLB */
vcpu->arch.host_cp0_badvaddr = addr; vcpu->arch.host_cp0_badvaddr = addr;
vcpu->arch.pc = curr_pc; vcpu->arch.pc = curr_pc;
kvm_mips_emulate_tlbinv_ld(cause, NULL, run, vcpu); kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu);
return EMULATE_EXCEPT; return EMULATE_EXCEPT;
default: default:
break; break;
...@@ -2180,7 +2178,6 @@ static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), ...@@ -2180,7 +2178,6 @@ static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long),
enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
u32 *opc, u32 cause, u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -2270,7 +2267,7 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, ...@@ -2270,7 +2267,7 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
* guest's behalf. * guest's behalf.
*/ */
er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
curr_pc, va, run, vcpu, cause); curr_pc, va, vcpu, cause);
if (er != EMULATE_DONE) if (er != EMULATE_DONE)
goto done; goto done;
#ifdef CONFIG_KVM_MIPS_DYN_TRANS #ifdef CONFIG_KVM_MIPS_DYN_TRANS
...@@ -2283,11 +2280,11 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, ...@@ -2283,11 +2280,11 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
} else if (op_inst == Hit_Invalidate_I) { } else if (op_inst == Hit_Invalidate_I) {
/* Perform the icache synchronisation on the guest's behalf */ /* Perform the icache synchronisation on the guest's behalf */
er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, er = kvm_mips_guest_cache_op(protected_writeback_dcache_line,
curr_pc, va, run, vcpu, cause); curr_pc, va, vcpu, cause);
if (er != EMULATE_DONE) if (er != EMULATE_DONE)
goto done; goto done;
er = kvm_mips_guest_cache_op(protected_flush_icache_line, er = kvm_mips_guest_cache_op(protected_flush_icache_line,
curr_pc, va, run, vcpu, cause); curr_pc, va, vcpu, cause);
if (er != EMULATE_DONE) if (er != EMULATE_DONE)
goto done; goto done;
...@@ -2313,7 +2310,6 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, ...@@ -2313,7 +2310,6 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
} }
enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
union mips_instruction inst; union mips_instruction inst;
...@@ -2329,14 +2325,14 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, ...@@ -2329,14 +2325,14 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
switch (inst.r_format.opcode) { switch (inst.r_format.opcode) {
case cop0_op: case cop0_op:
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu);
break; break;
#ifndef CONFIG_CPU_MIPSR6 #ifndef CONFIG_CPU_MIPSR6
case cache_op: case cache_op:
++vcpu->stat.cache_exits; ++vcpu->stat.cache_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); er = kvm_mips_emulate_cache(inst, opc, cause, vcpu);
break; break;
#else #else
case spec3_op: case spec3_op:
...@@ -2344,7 +2340,7 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, ...@@ -2344,7 +2340,7 @@ enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
case cache6_op: case cache6_op:
++vcpu->stat.cache_exits; ++vcpu->stat.cache_exits;
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_mips_emulate_cache(inst, opc, cause, run, er = kvm_mips_emulate_cache(inst, opc, cause,
vcpu); vcpu);
break; break;
default: default:
...@@ -2384,7 +2380,6 @@ long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu) ...@@ -2384,7 +2380,6 @@ long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu)
enum emulation_result kvm_mips_emulate_syscall(u32 cause, enum emulation_result kvm_mips_emulate_syscall(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2419,7 +2414,6 @@ enum emulation_result kvm_mips_emulate_syscall(u32 cause, ...@@ -2419,7 +2414,6 @@ enum emulation_result kvm_mips_emulate_syscall(u32 cause,
enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2463,7 +2457,6 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, ...@@ -2463,7 +2457,6 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2505,7 +2498,6 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, ...@@ -2505,7 +2498,6 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2547,7 +2539,6 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, ...@@ -2547,7 +2539,6 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2588,7 +2579,6 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, ...@@ -2588,7 +2579,6 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2628,7 +2618,6 @@ enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, ...@@ -2628,7 +2618,6 @@ enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2657,7 +2646,6 @@ enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, ...@@ -2657,7 +2646,6 @@ enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2692,7 +2680,6 @@ enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, ...@@ -2692,7 +2680,6 @@ enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2727,7 +2714,6 @@ enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, ...@@ -2727,7 +2714,6 @@ enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2762,7 +2748,6 @@ enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, ...@@ -2762,7 +2748,6 @@ enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2797,7 +2782,6 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, ...@@ -2797,7 +2782,6 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2832,7 +2816,6 @@ enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, ...@@ -2832,7 +2816,6 @@ enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2866,7 +2849,6 @@ enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, ...@@ -2866,7 +2849,6 @@ enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
} }
enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -2955,12 +2937,12 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, ...@@ -2955,12 +2937,12 @@ enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
* branch target), and pass the RI exception to the guest OS. * branch target), and pass the RI exception to the guest OS.
*/ */
vcpu->arch.pc = curr_pc; vcpu->arch.pc = curr_pc;
return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); return kvm_mips_emulate_ri_exc(cause, opc, vcpu);
} }
enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu)
struct kvm_run *run)
{ {
struct kvm_run *run = vcpu->run;
unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -3103,7 +3085,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, ...@@ -3103,7 +3085,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
static enum emulation_result kvm_mips_emulate_exc(u32 cause, static enum emulation_result kvm_mips_emulate_exc(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
...@@ -3141,7 +3122,6 @@ static enum emulation_result kvm_mips_emulate_exc(u32 cause, ...@@ -3141,7 +3122,6 @@ static enum emulation_result kvm_mips_emulate_exc(u32 cause,
enum emulation_result kvm_mips_check_privilege(u32 cause, enum emulation_result kvm_mips_check_privilege(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -3223,7 +3203,7 @@ enum emulation_result kvm_mips_check_privilege(u32 cause, ...@@ -3223,7 +3203,7 @@ enum emulation_result kvm_mips_check_privilege(u32 cause,
} }
if (er == EMULATE_PRIV_FAIL) if (er == EMULATE_PRIV_FAIL)
kvm_mips_emulate_exc(cause, opc, run, vcpu); kvm_mips_emulate_exc(cause, opc, vcpu);
return er; return er;
} }
...@@ -3237,7 +3217,6 @@ enum emulation_result kvm_mips_check_privilege(u32 cause, ...@@ -3237,7 +3217,6 @@ enum emulation_result kvm_mips_check_privilege(u32 cause,
*/ */
enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
u32 *opc, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu, struct kvm_vcpu *vcpu,
bool write_fault) bool write_fault)
{ {
...@@ -3261,9 +3240,9 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, ...@@ -3261,9 +3240,9 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
KVM_ENTRYHI_ASID)); KVM_ENTRYHI_ASID));
if (index < 0) { if (index < 0) {
if (exccode == EXCCODE_TLBL) { if (exccode == EXCCODE_TLBL) {
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu);
} else if (exccode == EXCCODE_TLBS) { } else if (exccode == EXCCODE_TLBS) {
er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu);
} else { } else {
kvm_err("%s: invalid exc code: %d\n", __func__, kvm_err("%s: invalid exc code: %d\n", __func__,
exccode); exccode);
...@@ -3278,10 +3257,10 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, ...@@ -3278,10 +3257,10 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
*/ */
if (!TLB_IS_VALID(*tlb, va)) { if (!TLB_IS_VALID(*tlb, va)) {
if (exccode == EXCCODE_TLBL) { if (exccode == EXCCODE_TLBL) {
er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, er = kvm_mips_emulate_tlbinv_ld(cause, opc,
vcpu); vcpu);
} else if (exccode == EXCCODE_TLBS) { } else if (exccode == EXCCODE_TLBS) {
er = kvm_mips_emulate_tlbinv_st(cause, opc, run, er = kvm_mips_emulate_tlbinv_st(cause, opc,
vcpu); vcpu);
} else { } else {
kvm_err("%s: invalid exc code: %d\n", __func__, kvm_err("%s: invalid exc code: %d\n", __func__,
......
...@@ -450,7 +450,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, ...@@ -450,7 +450,6 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
int r = -EINTR; int r = -EINTR;
vcpu_load(vcpu); vcpu_load(vcpu);
...@@ -459,11 +458,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -459,11 +458,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
if (vcpu->mmio_needed) { if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write) if (!vcpu->mmio_is_write)
kvm_mips_complete_mmio_load(vcpu, run); kvm_mips_complete_mmio_load(vcpu);
vcpu->mmio_needed = 0; vcpu->mmio_needed = 0;
} }
if (run->immediate_exit) if (vcpu->run->immediate_exit)
goto out; goto out;
lose_fpu(1); lose_fpu(1);
...@@ -480,7 +479,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -480,7 +479,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/ */
smp_store_mb(vcpu->mode, IN_GUEST_MODE); smp_store_mb(vcpu->mode, IN_GUEST_MODE);
r = kvm_mips_callbacks->vcpu_run(run, vcpu); r = kvm_mips_callbacks->vcpu_run(vcpu);
trace_kvm_out(vcpu); trace_kvm_out(vcpu);
guest_exit_irqoff(); guest_exit_irqoff();
...@@ -1236,7 +1235,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1236,7 +1235,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
* end up causing an exception to be delivered to the Guest * end up causing an exception to be delivered to the Guest
* Kernel * Kernel
*/ */
er = kvm_mips_check_privilege(cause, opc, run, vcpu); er = kvm_mips_check_privilege(cause, opc, vcpu);
if (er == EMULATE_PRIV_FAIL) { if (er == EMULATE_PRIV_FAIL) {
goto skip_emul; goto skip_emul;
} else if (er == EMULATE_FAIL) { } else if (er == EMULATE_FAIL) {
...@@ -1385,7 +1384,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1385,7 +1384,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/ */
smp_store_mb(vcpu->mode, IN_GUEST_MODE); smp_store_mb(vcpu->mode, IN_GUEST_MODE);
kvm_mips_callbacks->vcpu_reenter(run, vcpu); kvm_mips_callbacks->vcpu_reenter(vcpu);
/* /*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
......
...@@ -67,7 +67,6 @@ static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu) ...@@ -67,7 +67,6 @@ static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -81,14 +80,14 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) ...@@ -81,14 +80,14 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
* Unusable/no FPU in guest: * Unusable/no FPU in guest:
* deliver guest COP1 Unusable Exception * deliver guest COP1 Unusable Exception
*/ */
er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu);
} else { } else {
/* Restore FPU state */ /* Restore FPU state */
kvm_own_fpu(vcpu); kvm_own_fpu(vcpu);
er = EMULATE_DONE; er = EMULATE_DONE;
} }
} else { } else {
er = kvm_mips_emulate_inst(cause, opc, run, vcpu); er = kvm_mips_emulate_inst(cause, opc, vcpu);
} }
switch (er) { switch (er) {
...@@ -97,12 +96,12 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) ...@@ -97,12 +96,12 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
break; break;
case EMULATE_FAIL: case EMULATE_FAIL:
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
break; break;
case EMULATE_WAIT: case EMULATE_WAIT:
run->exit_reason = KVM_EXIT_INTR; vcpu->run->exit_reason = KVM_EXIT_INTR;
ret = RESUME_HOST; ret = RESUME_HOST;
break; break;
...@@ -116,8 +115,7 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) ...@@ -116,8 +115,7 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
return ret; return ret;
} }
static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run, static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
enum emulation_result er; enum emulation_result er;
union mips_instruction inst; union mips_instruction inst;
...@@ -125,7 +123,7 @@ static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run, ...@@ -125,7 +123,7 @@ static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
/* A code fetch fault doesn't count as an MMIO */ /* A code fetch fault doesn't count as an MMIO */
if (kvm_is_ifetch_fault(&vcpu->arch)) { if (kvm_is_ifetch_fault(&vcpu->arch)) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST; return RESUME_HOST;
} }
...@@ -134,23 +132,22 @@ static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run, ...@@ -134,23 +132,22 @@ static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
opc += 1; opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word); err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) { if (err) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST; return RESUME_HOST;
} }
/* Emulate the load */ /* Emulate the load */
er = kvm_mips_emulate_load(inst, cause, run, vcpu); er = kvm_mips_emulate_load(inst, cause, vcpu);
if (er == EMULATE_FAIL) { if (er == EMULATE_FAIL) {
kvm_err("Emulate load from MMIO space failed\n"); kvm_err("Emulate load from MMIO space failed\n");
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
} else { } else {
run->exit_reason = KVM_EXIT_MMIO; vcpu->run->exit_reason = KVM_EXIT_MMIO;
} }
return RESUME_HOST; return RESUME_HOST;
} }
static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run, static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
enum emulation_result er; enum emulation_result er;
union mips_instruction inst; union mips_instruction inst;
...@@ -161,34 +158,33 @@ static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run, ...@@ -161,34 +158,33 @@ static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
opc += 1; opc += 1;
err = kvm_get_badinstr(opc, vcpu, &inst.word); err = kvm_get_badinstr(opc, vcpu, &inst.word);
if (err) { if (err) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST; return RESUME_HOST;
} }
/* Emulate the store */ /* Emulate the store */
er = kvm_mips_emulate_store(inst, cause, run, vcpu); er = kvm_mips_emulate_store(inst, cause, vcpu);
if (er == EMULATE_FAIL) { if (er == EMULATE_FAIL) {
kvm_err("Emulate store to MMIO space failed\n"); kvm_err("Emulate store to MMIO space failed\n");
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
} else { } else {
run->exit_reason = KVM_EXIT_MMIO; vcpu->run->exit_reason = KVM_EXIT_MMIO;
} }
return RESUME_HOST; return RESUME_HOST;
} }
static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run, static int kvm_mips_bad_access(u32 cause, u32 *opc,
struct kvm_vcpu *vcpu, bool store) struct kvm_vcpu *vcpu, bool store)
{ {
if (store) if (store)
return kvm_mips_bad_store(cause, opc, run, vcpu); return kvm_mips_bad_store(cause, opc, vcpu);
else else
return kvm_mips_bad_load(cause, opc, run, vcpu); return kvm_mips_bad_load(cause, opc, vcpu);
} }
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
...@@ -212,12 +208,12 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) ...@@ -212,12 +208,12 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
* They would indicate stale host TLB entries. * They would indicate stale host TLB entries.
*/ */
if (unlikely(index < 0)) { if (unlikely(index < 0)) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST; return RESUME_HOST;
} }
tlb = vcpu->arch.guest_tlb + index; tlb = vcpu->arch.guest_tlb + index;
if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) { if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST; return RESUME_HOST;
} }
...@@ -226,23 +222,23 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) ...@@ -226,23 +222,23 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
* exception. Relay that on to the guest so it can handle it. * exception. Relay that on to the guest so it can handle it.
*/ */
if (!TLB_IS_DIRTY(*tlb, badvaddr)) { if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); kvm_mips_emulate_tlbmod(cause, opc, vcpu);
return RESUME_GUEST; return RESUME_GUEST;
} }
if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr, if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
true)) true))
/* Not writable, needs handling as MMIO */ /* Not writable, needs handling as MMIO */
return kvm_mips_bad_store(cause, opc, run, vcpu); return kvm_mips_bad_store(cause, opc, vcpu);
return RESUME_GUEST; return RESUME_GUEST;
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0) if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
/* Not writable, needs handling as MMIO */ /* Not writable, needs handling as MMIO */
return kvm_mips_bad_store(cause, opc, run, vcpu); return kvm_mips_bad_store(cause, opc, vcpu);
return RESUME_GUEST; return RESUME_GUEST;
} else { } else {
/* host kernel addresses are all handled as MMIO */ /* host kernel addresses are all handled as MMIO */
return kvm_mips_bad_store(cause, opc, run, vcpu); return kvm_mips_bad_store(cause, opc, vcpu);
} }
} }
...@@ -276,7 +272,7 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) ...@@ -276,7 +272,7 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
* into the shadow host TLB * into the shadow host TLB
*/ */
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store); er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
ret = RESUME_GUEST; ret = RESUME_GUEST;
else { else {
...@@ -289,14 +285,14 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) ...@@ -289,14 +285,14 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
* not expect to ever get them * not expect to ever get them
*/ */
if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0) if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
ret = kvm_mips_bad_access(cause, opc, run, vcpu, store); ret = kvm_mips_bad_access(cause, opc, vcpu, store);
} else if (KVM_GUEST_KERNEL_MODE(vcpu) } else if (KVM_GUEST_KERNEL_MODE(vcpu)
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
/* /*
* With EVA we may get a TLB exception instead of an address * With EVA we may get a TLB exception instead of an address
* error when the guest performs MMIO to KSeg1 addresses. * error when the guest performs MMIO to KSeg1 addresses.
*/ */
ret = kvm_mips_bad_access(cause, opc, run, vcpu, store); ret = kvm_mips_bad_access(cause, opc, vcpu, store);
} else { } else {
kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
store ? "ST" : "LD", cause, opc, badvaddr); store ? "ST" : "LD", cause, opc, badvaddr);
...@@ -320,7 +316,6 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) ...@@ -320,7 +316,6 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
...@@ -328,11 +323,11 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) ...@@ -328,11 +323,11 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
if (KVM_GUEST_KERNEL_MODE(vcpu) if (KVM_GUEST_KERNEL_MODE(vcpu)
&& (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
ret = kvm_mips_bad_store(cause, opc, run, vcpu); ret = kvm_mips_bad_store(cause, opc, vcpu);
} else { } else {
kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr); cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} }
return ret; return ret;
...@@ -340,18 +335,17 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) ...@@ -340,18 +335,17 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
ret = kvm_mips_bad_load(cause, opc, run, vcpu); ret = kvm_mips_bad_load(cause, opc, vcpu);
} else { } else {
kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr); cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} }
return ret; return ret;
...@@ -359,17 +353,16 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) ...@@ -359,17 +353,16 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); er = kvm_mips_emulate_syscall(cause, opc, vcpu);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
ret = RESUME_GUEST; ret = RESUME_GUEST;
else { else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} }
return ret; return ret;
...@@ -377,17 +370,16 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) ...@@ -377,17 +370,16 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
er = kvm_mips_handle_ri(cause, opc, run, vcpu); er = kvm_mips_handle_ri(cause, opc, vcpu);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
ret = RESUME_GUEST; ret = RESUME_GUEST;
else { else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} }
return ret; return ret;
...@@ -395,17 +387,16 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) ...@@ -395,17 +387,16 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); er = kvm_mips_emulate_bp_exc(cause, opc, vcpu);
if (er == EMULATE_DONE) if (er == EMULATE_DONE)
ret = RESUME_GUEST; ret = RESUME_GUEST;
else { else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} }
return ret; return ret;
...@@ -413,17 +404,16 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) ...@@ -413,17 +404,16 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *)vcpu->arch.pc; u32 __user *opc = (u32 __user *)vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu); er = kvm_mips_emulate_trap_exc(cause, opc, vcpu);
if (er == EMULATE_DONE) { if (er == EMULATE_DONE) {
ret = RESUME_GUEST; ret = RESUME_GUEST;
} else { } else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} }
return ret; return ret;
...@@ -431,17 +421,16 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) ...@@ -431,17 +421,16 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *)vcpu->arch.pc; u32 __user *opc = (u32 __user *)vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu); er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu);
if (er == EMULATE_DONE) { if (er == EMULATE_DONE) {
ret = RESUME_GUEST; ret = RESUME_GUEST;
} else { } else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} }
return ret; return ret;
...@@ -449,17 +438,16 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) ...@@ -449,17 +438,16 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *)vcpu->arch.pc; u32 __user *opc = (u32 __user *)vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu); er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu);
if (er == EMULATE_DONE) { if (er == EMULATE_DONE) {
ret = RESUME_GUEST; ret = RESUME_GUEST;
} else { } else {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
} }
return ret; return ret;
...@@ -474,7 +462,6 @@ static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) ...@@ -474,7 +462,6 @@ static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
u32 __user *opc = (u32 __user *) vcpu->arch.pc; u32 __user *opc = (u32 __user *) vcpu->arch.pc;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -486,10 +473,10 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) ...@@ -486,10 +473,10 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
* No MSA in guest, or FPU enabled and not in FR=1 mode, * No MSA in guest, or FPU enabled and not in FR=1 mode,
* guest reserved instruction exception * guest reserved instruction exception
*/ */
er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); er = kvm_mips_emulate_ri_exc(cause, opc, vcpu);
} else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
/* MSA disabled by guest, guest MSA disabled exception */ /* MSA disabled by guest, guest MSA disabled exception */
er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu); er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu);
} else { } else {
/* Restore MSA/FPU state */ /* Restore MSA/FPU state */
kvm_own_msa(vcpu); kvm_own_msa(vcpu);
...@@ -502,7 +489,7 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) ...@@ -502,7 +489,7 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
break; break;
case EMULATE_FAIL: case EMULATE_FAIL:
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
break; break;
...@@ -1184,8 +1171,7 @@ void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu) ...@@ -1184,8 +1171,7 @@ void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
local_irq_enable(); local_irq_enable();
} }
static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu)
struct kvm_vcpu *vcpu)
{ {
struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
...@@ -1228,7 +1214,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, ...@@ -1228,7 +1214,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
check_mmu_context(mm); check_mmu_context(mm);
} }
static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int r; int r;
...@@ -1237,7 +1223,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1237,7 +1223,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_mips_deliver_interrupts(vcpu, kvm_mips_deliver_interrupts(vcpu,
kvm_read_c0_guest_cause(vcpu->arch.cop0)); kvm_read_c0_guest_cause(vcpu->arch.cop0));
kvm_trap_emul_vcpu_reenter(run, vcpu); kvm_trap_emul_vcpu_reenter(vcpu);
/* /*
* We use user accessors to access guest memory, but we don't want to * We use user accessors to access guest memory, but we don't want to
...@@ -1255,7 +1241,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -1255,7 +1241,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
*/ */
kvm_mips_suspend_mm(cpu); kvm_mips_suspend_mm(cpu);
r = vcpu->arch.vcpu_run(run, vcpu); r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
/* We may have migrated while handling guest exits */ /* We may have migrated while handling guest exits */
cpu = smp_processor_id(); cpu = smp_processor_id();
......
...@@ -874,7 +874,6 @@ static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val) ...@@ -874,7 +874,6 @@ static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
u32 *opc, u32 cause, u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
struct mips_coproc *cop0 = vcpu->arch.cop0; struct mips_coproc *cop0 = vcpu->arch.cop0;
...@@ -1074,7 +1073,6 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, ...@@ -1074,7 +1073,6 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst, static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
u32 *opc, u32 cause, u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu) struct kvm_vcpu *vcpu)
{ {
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
...@@ -1217,7 +1215,6 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, ...@@ -1217,7 +1215,6 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
{ {
enum emulation_result er = EMULATE_DONE; enum emulation_result er = EMULATE_DONE;
struct kvm_vcpu_arch *arch = &vcpu->arch; struct kvm_vcpu_arch *arch = &vcpu->arch;
struct kvm_run *run = vcpu->run;
union mips_instruction inst; union mips_instruction inst;
int rd, rt, sel; int rd, rt, sel;
int err; int err;
...@@ -1233,12 +1230,12 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, ...@@ -1233,12 +1230,12 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
switch (inst.r_format.opcode) { switch (inst.r_format.opcode) {
case cop0_op: case cop0_op:
er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu); er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu);
break; break;
#ifndef CONFIG_CPU_MIPSR6 #ifndef CONFIG_CPU_MIPSR6
case cache_op: case cache_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
break; break;
#endif #endif
#ifdef CONFIG_CPU_LOONGSON64 #ifdef CONFIG_CPU_LOONGSON64
...@@ -1251,7 +1248,7 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, ...@@ -1251,7 +1248,7 @@ static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
#ifdef CONFIG_CPU_MIPSR6 #ifdef CONFIG_CPU_MIPSR6
case cache6_op: case cache6_op:
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu);
break; break;
#endif #endif
case rdhwr_op: case rdhwr_op:
...@@ -1553,7 +1550,6 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu) ...@@ -1553,7 +1550,6 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
*/ */
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause; u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_FAIL; enum emulation_result er = EMULATE_FAIL;
int ret = RESUME_GUEST; int ret = RESUME_GUEST;
...@@ -1581,7 +1577,7 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) ...@@ -1581,7 +1577,7 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
break; break;
case EMULATE_FAIL: case EMULATE_FAIL:
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST; ret = RESUME_HOST;
break; break;
...@@ -1600,8 +1596,6 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) ...@@ -1600,8 +1596,6 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
*/ */
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *run = vcpu->run;
/* /*
* If MSA not present or not exposed to guest or FR=0, the MSA operation * If MSA not present or not exposed to guest or FR=0, the MSA operation
* should have been treated as a reserved instruction! * should have been treated as a reserved instruction!
...@@ -1612,7 +1606,7 @@ static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) ...@@ -1612,7 +1606,7 @@ static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
(read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 || (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
!(read_gc0_config5() & MIPS_CONF5_MSAEN) || !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR; vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return RESUME_HOST; return RESUME_HOST;
} }
...@@ -1648,7 +1642,7 @@ static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) ...@@ -1648,7 +1642,7 @@ static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
} }
/* Treat as MMIO */ /* Treat as MMIO */
er = kvm_mips_emulate_load(inst, cause, run, vcpu); er = kvm_mips_emulate_load(inst, cause, vcpu);
if (er == EMULATE_FAIL) { if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n", kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr); opc, badvaddr);
...@@ -1695,7 +1689,7 @@ static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu) ...@@ -1695,7 +1689,7 @@ static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
} }
/* Treat as MMIO */ /* Treat as MMIO */
er = kvm_mips_emulate_store(inst, cause, run, vcpu); er = kvm_mips_emulate_store(inst, cause, vcpu);
if (er == EMULATE_FAIL) { if (er == EMULATE_FAIL) {
kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n", kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
opc, badvaddr); opc, badvaddr);
...@@ -3242,7 +3236,7 @@ static void kvm_vz_flush_shadow_memslot(struct kvm *kvm, ...@@ -3242,7 +3236,7 @@ static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
kvm_vz_flush_shadow_all(kvm); kvm_vz_flush_shadow_all(kvm);
} }
static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu) static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int preserve_guest_tlb; int preserve_guest_tlb;
...@@ -3258,7 +3252,7 @@ static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -3258,7 +3252,7 @@ static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_vz_vcpu_load_wired(vcpu); kvm_vz_vcpu_load_wired(vcpu);
} }
static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
int r; int r;
...@@ -3271,7 +3265,7 @@ static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -3271,7 +3265,7 @@ static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_vz_vcpu_load_tlb(vcpu, cpu); kvm_vz_vcpu_load_tlb(vcpu, cpu);
kvm_vz_vcpu_load_wired(vcpu); kvm_vz_vcpu_load_wired(vcpu);
r = vcpu->arch.vcpu_run(run, vcpu); r = vcpu->arch.vcpu_run(vcpu->run, vcpu);
kvm_vz_vcpu_save_wired(vcpu); kvm_vz_vcpu_save_wired(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment