Commit c3e8abf0 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Remove defunct pre_block/post_block kvm_x86_ops hooks

Drop kvm_x86_ops' pre/post_block() now that all implementations are nops.

No functional change intended.
Signed-off-by: default avatarSean Christopherson <seanjc@google.com>
Reviewed-by: default avatarMaxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20211208015236.1616697-10-seanjc@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b6d42bad
...@@ -99,8 +99,6 @@ KVM_X86_OP(handle_exit_irqoff) ...@@ -99,8 +99,6 @@ KVM_X86_OP(handle_exit_irqoff)
KVM_X86_OP_NULL(request_immediate_exit) KVM_X86_OP_NULL(request_immediate_exit)
KVM_X86_OP(sched_in) KVM_X86_OP(sched_in)
KVM_X86_OP_NULL(update_cpu_dirty_logging) KVM_X86_OP_NULL(update_cpu_dirty_logging)
KVM_X86_OP_NULL(pre_block)
KVM_X86_OP_NULL(post_block)
KVM_X86_OP_NULL(vcpu_blocking) KVM_X86_OP_NULL(vcpu_blocking)
KVM_X86_OP_NULL(vcpu_unblocking) KVM_X86_OP_NULL(vcpu_unblocking)
KVM_X86_OP_NULL(update_pi_irte) KVM_X86_OP_NULL(update_pi_irte)
......
...@@ -1454,18 +1454,6 @@ struct kvm_x86_ops { ...@@ -1454,18 +1454,6 @@ struct kvm_x86_ops {
const struct kvm_pmu_ops *pmu_ops; const struct kvm_pmu_ops *pmu_ops;
const struct kvm_x86_nested_ops *nested_ops; const struct kvm_x86_nested_ops *nested_ops;
/*
* Architecture specific hooks for vCPU blocking due to
* HLT instruction.
* Returns for .pre_block():
* - 0 means continue to block the vCPU.
* - 1 means we cannot block the vCPU since some event
* happens during this period, such as, 'ON' bit in
* posted-interrupts descriptor is set.
*/
int (*pre_block)(struct kvm_vcpu *vcpu);
void (*post_block)(struct kvm_vcpu *vcpu);
void (*vcpu_blocking)(struct kvm_vcpu *vcpu); void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
void (*vcpu_unblocking)(struct kvm_vcpu *vcpu); void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
......
...@@ -7566,16 +7566,6 @@ void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu) ...@@ -7566,16 +7566,6 @@ void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML); secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
} }
static int vmx_pre_block(struct kvm_vcpu *vcpu)
{
return 0;
}
static void vmx_post_block(struct kvm_vcpu *vcpu)
{
}
static void vmx_setup_mce(struct kvm_vcpu *vcpu) static void vmx_setup_mce(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.mcg_cap & MCG_LMCE_P) if (vcpu->arch.mcg_cap & MCG_LMCE_P)
...@@ -7777,9 +7767,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { ...@@ -7777,9 +7767,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.cpu_dirty_log_size = PML_ENTITY_NUM, .cpu_dirty_log_size = PML_ENTITY_NUM,
.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging, .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
.pre_block = vmx_pre_block,
.post_block = vmx_post_block,
.pmu_ops = &intel_pmu_ops, .pmu_ops = &intel_pmu_ops,
.nested_ops = &vmx_nested_ops, .nested_ops = &vmx_nested_ops,
......
...@@ -10148,8 +10148,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) ...@@ -10148,8 +10148,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{ {
bool hv_timer; bool hv_timer;
if (!kvm_arch_vcpu_runnable(vcpu) && if (!kvm_arch_vcpu_runnable(vcpu)) {
(!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) {
/* /*
* Switch to the software timer before halt-polling/blocking as * Switch to the software timer before halt-polling/blocking as
* the guest's timer may be a break event for the vCPU, and the * the guest's timer may be a break event for the vCPU, and the
...@@ -10171,9 +10170,6 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) ...@@ -10171,9 +10170,6 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
if (hv_timer) if (hv_timer)
kvm_lapic_switch_to_hv_timer(vcpu); kvm_lapic_switch_to_hv_timer(vcpu);
if (kvm_x86_ops.post_block)
static_call(kvm_x86_post_block)(vcpu);
if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment