Commit 2f0a83be authored by Tianjia Zhang's avatar Tianjia Zhang Committed by Paolo Bonzini

KVM: s390: clean up redundant 'kvm_run' parameters

In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu'
structure. For historical reasons, many kvm-related function parameters
retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This
patch does a unified cleanup of these remaining redundant parameters.
Signed-off-by: default avatarTianjia Zhang <tianjia.zhang@linux.alibaba.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20200623131418.31473-2-tianjia.zhang@linux.alibaba.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 6627a72c
...@@ -4173,8 +4173,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) ...@@ -4173,8 +4173,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc; return rc;
} }
static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static void sync_regs_fmt2(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = vcpu->run;
struct runtime_instr_cb *riccb; struct runtime_instr_cb *riccb;
struct gs_cb *gscb; struct gs_cb *gscb;
...@@ -4240,8 +4241,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -4240,8 +4241,10 @@ static void sync_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* SIE will load etoken directly from SDNX and therefore kvm_run */ /* SIE will load etoken directly from SDNX and therefore kvm_run */
} }
static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static void sync_regs(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = vcpu->run;
if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix); kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) { if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
...@@ -4270,7 +4273,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -4270,7 +4273,7 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* Sync fmt2 only data */ /* Sync fmt2 only data */
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) { if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) {
sync_regs_fmt2(vcpu, kvm_run); sync_regs_fmt2(vcpu);
} else { } else {
/* /*
* In several places we have to modify our internal view to * In several places we have to modify our internal view to
...@@ -4289,8 +4292,10 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -4289,8 +4292,10 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->kvm_dirty_regs = 0; kvm_run->kvm_dirty_regs = 0;
} }
static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static void store_regs_fmt2(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = vcpu->run;
kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr; kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea; kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
...@@ -4310,8 +4315,10 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -4310,8 +4315,10 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
/* SIE will save etoken directly into SDNX and therefore kvm_run */ /* SIE will save etoken directly into SDNX and therefore kvm_run */
} }
static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) static void store_regs(struct kvm_vcpu *vcpu)
{ {
struct kvm_run *kvm_run = vcpu->run;
kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask; kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr; kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu); kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
...@@ -4330,7 +4337,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -4330,7 +4337,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
if (likely(!kvm_s390_pv_cpu_is_protected(vcpu))) if (likely(!kvm_s390_pv_cpu_is_protected(vcpu)))
store_regs_fmt2(vcpu, kvm_run); store_regs_fmt2(vcpu);
} }
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
...@@ -4368,7 +4375,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -4368,7 +4375,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
goto out; goto out;
} }
sync_regs(vcpu, kvm_run); sync_regs(vcpu);
enable_cpu_timer_accounting(vcpu); enable_cpu_timer_accounting(vcpu);
might_fault(); might_fault();
...@@ -4390,7 +4397,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -4390,7 +4397,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
} }
disable_cpu_timer_accounting(vcpu); disable_cpu_timer_accounting(vcpu);
store_regs(vcpu, kvm_run); store_regs(vcpu);
kvm_sigset_deactivate(vcpu); kvm_sigset_deactivate(vcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment