Commit 57f252f2 authored by Jan Kiszka's avatar Jan Kiszka Committed by Gleb Natapov

KVM: x86: Drop unused return code from VCPU reset callback

Neither vmx nor svm nor the common part may generate an error on
kvm_vcpu_reset. So drop the return code.
Reviewed-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarJan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
parent 03ba32ca
...@@ -643,7 +643,7 @@ struct kvm_x86_ops { ...@@ -643,7 +643,7 @@ struct kvm_x86_ops {
/* Create, but do not attach this VCPU */ /* Create, but do not attach this VCPU */
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id); struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
void (*vcpu_free)(struct kvm_vcpu *vcpu); void (*vcpu_free)(struct kvm_vcpu *vcpu);
int (*vcpu_reset)(struct kvm_vcpu *vcpu); void (*vcpu_reset)(struct kvm_vcpu *vcpu);
void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); void (*prepare_guest_switch)(struct kvm_vcpu *vcpu);
void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
......
...@@ -1191,7 +1191,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1191,7 +1191,7 @@ static void init_vmcb(struct vcpu_svm *svm)
enable_gif(svm); enable_gif(svm);
} }
static int svm_vcpu_reset(struct kvm_vcpu *vcpu) static void svm_vcpu_reset(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
u32 dummy; u32 dummy;
...@@ -1207,8 +1207,6 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -1207,8 +1207,6 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy); kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
kvm_register_write(vcpu, VCPU_REGS_RDX, eax); kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
return 0;
} }
static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
......
...@@ -4100,11 +4100,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -4100,11 +4100,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
return 0; return 0;
} }
static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 msr; u64 msr;
int ret;
vmx->rmode.vm86_active = 0; vmx->rmode.vm86_active = 0;
...@@ -4195,10 +4194,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -4195,10 +4194,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
update_exception_bitmap(&vmx->vcpu); update_exception_bitmap(&vmx->vcpu);
vpid_sync_context(vmx); vpid_sync_context(vmx);
ret = 0;
return ret;
} }
/* /*
......
...@@ -162,7 +162,7 @@ u64 __read_mostly host_xcr0; ...@@ -162,7 +162,7 @@ u64 __read_mostly host_xcr0;
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt); static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
static int kvm_vcpu_reset(struct kvm_vcpu *vcpu); static void kvm_vcpu_reset(struct kvm_vcpu *vcpu);
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{ {
...@@ -5858,9 +5858,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) ...@@ -5858,9 +5858,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
pr_debug("vcpu %d received sipi with vector # %x\n", pr_debug("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, vcpu->arch.sipi_vector); vcpu->vcpu_id, vcpu->arch.sipi_vector);
kvm_lapic_reset(vcpu); kvm_lapic_reset(vcpu);
r = kvm_vcpu_reset(vcpu); kvm_vcpu_reset(vcpu);
if (r)
return r;
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
} }
...@@ -6486,9 +6484,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) ...@@ -6486,9 +6484,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
r = vcpu_load(vcpu); r = vcpu_load(vcpu);
if (r) if (r)
return r; return r;
r = kvm_vcpu_reset(vcpu); kvm_vcpu_reset(vcpu);
if (r == 0) r = kvm_mmu_setup(vcpu);
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu); vcpu_put(vcpu);
return r; return r;
...@@ -6525,7 +6522,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) ...@@ -6525,7 +6522,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_x86_ops->vcpu_free(vcpu); kvm_x86_ops->vcpu_free(vcpu);
} }
static int kvm_vcpu_reset(struct kvm_vcpu *vcpu) static void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
{ {
atomic_set(&vcpu->arch.nmi_queued, 0); atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_pending = 0;
...@@ -6552,7 +6549,7 @@ static int kvm_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -6552,7 +6549,7 @@ static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
vcpu->arch.regs_avail = ~0; vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0; vcpu->arch.regs_dirty = ~0;
return kvm_x86_ops->vcpu_reset(vcpu); kvm_x86_ops->vcpu_reset(vcpu);
} }
int kvm_arch_hardware_enable(void *garbage) int kvm_arch_hardware_enable(void *garbage)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment