Commit 005467e0 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: Drop obsolete kvm_arch_vcpu_block_finish()

Drop kvm_arch_vcpu_block_finish() now that all arch implementations are
nops.

No functional change intended.
Acked-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: default avatarDavid Matlack <dmatlack@google.com>
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Message-Id: <20211009021236.4122790-10-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 1460179d
...@@ -717,7 +717,6 @@ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu); ...@@ -717,7 +717,6 @@ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
void kvm_arm_init_debug(void); void kvm_arm_init_debug(void);
void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
......
...@@ -897,7 +897,6 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} ...@@ -897,7 +897,6 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
int kvm_arch_flush_remote_tlb(struct kvm *kvm); int kvm_arch_flush_remote_tlb(struct kvm *kvm);
......
...@@ -865,6 +865,5 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} ...@@ -865,6 +865,5 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_exit(void) {} static inline void kvm_arch_exit(void) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
#endif /* __POWERPC_KVM_HOST_H__ */ #endif /* __POWERPC_KVM_HOST_H__ */
...@@ -208,7 +208,6 @@ struct kvm_vcpu_arch { ...@@ -208,7 +208,6 @@ struct kvm_vcpu_arch {
static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
#define KVM_ARCH_WANT_MMU_NOTIFIER #define KVM_ARCH_WANT_MMU_NOTIFIER
......
...@@ -1010,6 +1010,4 @@ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm, ...@@ -1010,6 +1010,4 @@ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu);
#endif #endif
...@@ -5047,11 +5047,6 @@ static inline unsigned long nonhyp_mask(int i) ...@@ -5047,11 +5047,6 @@ static inline unsigned long nonhyp_mask(int i)
return 0x0000ffffffffffffUL >> (nonhyp_fai << 4); return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
} }
void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
{
}
static int __init kvm_s390_init(void) static int __init kvm_s390_init(void)
{ {
int i; int i;
......
...@@ -1931,8 +1931,6 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) ...@@ -1931,8 +1931,6 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
static_call_cond(kvm_x86_vcpu_unblocking)(vcpu); static_call_cond(kvm_x86_vcpu_unblocking)(vcpu);
} }
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
static inline int kvm_cpu_get_apicid(int mps_cpu) static inline int kvm_cpu_get_apicid(int mps_cpu)
{ {
#ifdef CONFIG_X86_LOCAL_APIC #ifdef CONFIG_X86_LOCAL_APIC
......
...@@ -3376,7 +3376,6 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu) ...@@ -3376,7 +3376,6 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
} }
trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu)); trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
kvm_arch_vcpu_block_finish(vcpu);
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_block); EXPORT_SYMBOL_GPL(kvm_vcpu_block);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment