Commit e0121fa2 authored by Vitaly Kuznetsov's avatar Vitaly Kuznetsov Committed by Paolo Bonzini

KVM: x86: hyper-v: Rename vcpu_to_synic()/synic_to_vcpu()

vcpu_to_synic()'s argument is almost always 'vcpu' so there's no need to
have an additional prefix. Also, as this is used outside of hyper-v
emulation code, add '_hv_' part to make it clear what this s. This makes
the naming more consistent with to_hv_vcpu().

Rename synic_to_vcpu() to hv_synic_to_vcpu() for consistency.

No functional change intended.
Suggested-by: default avatarSean Christopherson <seanjc@google.com>
Signed-off-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Message-Id: <20210126134816.1880136-6-vkuznets@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ef3f3980
...@@ -129,7 +129,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint, ...@@ -129,7 +129,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
synic_update_vector(synic, vector); synic_update_vector(synic, vector);
/* Load SynIC vectors into EOI exit bitmap */ /* Load SynIC vectors into EOI exit bitmap */
kvm_make_request(KVM_REQ_SCAN_IOAPIC, synic_to_vcpu(synic)); kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
return 0; return 0;
} }
...@@ -158,14 +158,14 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx) ...@@ -158,14 +158,14 @@ static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
vcpu = get_vcpu_by_vpidx(kvm, vpidx); vcpu = get_vcpu_by_vpidx(kvm, vpidx);
if (!vcpu) if (!vcpu)
return NULL; return NULL;
synic = vcpu_to_synic(vcpu); synic = to_hv_synic(vcpu);
return (synic->active) ? synic : NULL; return (synic->active) ? synic : NULL;
} }
static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_vcpu_hv_stimer *stimer; struct kvm_vcpu_hv_stimer *stimer;
int gsi, idx; int gsi, idx;
...@@ -190,7 +190,7 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint) ...@@ -190,7 +190,7 @@ static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
{ {
struct kvm_vcpu *vcpu = synic_to_vcpu(synic); struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
...@@ -205,7 +205,7 @@ static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) ...@@ -205,7 +205,7 @@ static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
u32 msr, u64 data, bool host) u32 msr, u64 data, bool host)
{ {
struct kvm_vcpu *vcpu = synic_to_vcpu(synic); struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
int ret; int ret;
if (!synic->active && !host) if (!synic->active && !host)
...@@ -422,7 +422,7 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata, ...@@ -422,7 +422,7 @@ static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint) static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
{ {
struct kvm_vcpu *vcpu = synic_to_vcpu(synic); struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
struct kvm_lapic_irq irq; struct kvm_lapic_irq irq;
int ret, vector; int ret, vector;
...@@ -458,7 +458,7 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint) ...@@ -458,7 +458,7 @@ int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector) void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
{ {
struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
int i; int i;
trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
...@@ -635,7 +635,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config, ...@@ -635,7 +635,7 @@ static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
union hv_stimer_config new_config = {.as_uint64 = config}, union hv_stimer_config new_config = {.as_uint64 = config},
old_config = {.as_uint64 = stimer->config.as_uint64}; old_config = {.as_uint64 = stimer->config.as_uint64};
struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
if (!synic->active && !host) if (!synic->active && !host)
return 1; return 1;
...@@ -659,7 +659,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count, ...@@ -659,7 +659,7 @@ static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
bool host) bool host)
{ {
struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer); struct kvm_vcpu *vcpu = stimer_to_vcpu(stimer);
struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
if (!synic->active && !host) if (!synic->active && !host)
return 1; return 1;
...@@ -695,7 +695,7 @@ static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount) ...@@ -695,7 +695,7 @@ static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint, static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
struct hv_message *src_msg, bool no_retry) struct hv_message *src_msg, bool no_retry)
{ {
struct kvm_vcpu *vcpu = synic_to_vcpu(synic); struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
int msg_off = offsetof(struct hv_message_page, sint_message[sint]); int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
gfn_t msg_page_gfn; gfn_t msg_page_gfn;
struct hv_message_header hv_hdr; struct hv_message_header hv_hdr;
...@@ -764,7 +764,7 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer) ...@@ -764,7 +764,7 @@ static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
payload->expiration_time = stimer->exp_time; payload->expiration_time = stimer->exp_time;
payload->delivery_time = get_time_ref_counter(vcpu->kvm); payload->delivery_time = get_time_ref_counter(vcpu->kvm);
return synic_deliver_msg(vcpu_to_synic(vcpu), return synic_deliver_msg(to_hv_synic(vcpu),
stimer->config.sintx, msg, stimer->config.sintx, msg,
no_retry); no_retry);
} }
...@@ -902,7 +902,7 @@ void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -902,7 +902,7 @@ void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu)
int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages) int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
{ {
struct kvm_vcpu_hv_synic *synic = vcpu_to_synic(vcpu); struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
/* /*
* Hyper-V SynIC auto EOI SINT's are * Hyper-V SynIC auto EOI SINT's are
...@@ -1309,7 +1309,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) ...@@ -1309,7 +1309,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
case HV_X64_MSR_SIMP: case HV_X64_MSR_SIMP:
case HV_X64_MSR_EOM: case HV_X64_MSR_EOM:
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
return synic_set_msr(vcpu_to_synic(vcpu), msr, data, host); return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
case HV_X64_MSR_STIMER0_CONFIG: case HV_X64_MSR_STIMER0_CONFIG:
case HV_X64_MSR_STIMER1_CONFIG: case HV_X64_MSR_STIMER1_CONFIG:
case HV_X64_MSR_STIMER2_CONFIG: case HV_X64_MSR_STIMER2_CONFIG:
...@@ -1421,7 +1421,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, ...@@ -1421,7 +1421,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
case HV_X64_MSR_SIMP: case HV_X64_MSR_SIMP:
case HV_X64_MSR_EOM: case HV_X64_MSR_EOM:
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host); return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
case HV_X64_MSR_STIMER0_CONFIG: case HV_X64_MSR_STIMER0_CONFIG:
case HV_X64_MSR_STIMER1_CONFIG: case HV_X64_MSR_STIMER1_CONFIG:
case HV_X64_MSR_STIMER2_CONFIG: case HV_X64_MSR_STIMER2_CONFIG:
...@@ -1811,7 +1811,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) ...@@ -1811,7 +1811,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
fallthrough; /* maybe userspace knows this conn_id */ fallthrough; /* maybe userspace knows this conn_id */
case HVCALL_POST_MESSAGE: case HVCALL_POST_MESSAGE:
/* don't bother userspace if it has no way to handle it */ /* don't bother userspace if it has no way to handle it */
if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { if (unlikely(rep || !to_hv_synic(vcpu)->active)) {
ret = HV_STATUS_INVALID_HYPERCALL_INPUT; ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
break; break;
} }
......
...@@ -63,12 +63,12 @@ static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu) ...@@ -63,12 +63,12 @@ static inline struct kvm_vcpu *hv_vcpu_to_vcpu(struct kvm_vcpu_hv *hv_vcpu)
return container_of(arch, struct kvm_vcpu, arch); return container_of(arch, struct kvm_vcpu, arch);
} }
static inline struct kvm_vcpu_hv_synic *vcpu_to_synic(struct kvm_vcpu *vcpu) static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu)
{ {
return &vcpu->arch.hyperv.synic; return &vcpu->arch.hyperv.synic;
} }
static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
{ {
return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic)); return hv_vcpu_to_vcpu(container_of(synic, struct kvm_vcpu_hv, synic));
} }
......
...@@ -1245,7 +1245,7 @@ static int apic_set_eoi(struct kvm_lapic *apic) ...@@ -1245,7 +1245,7 @@ static int apic_set_eoi(struct kvm_lapic *apic)
apic_clear_isr(vector, apic); apic_clear_isr(vector, apic);
apic_update_ppr(apic); apic_update_ppr(apic);
if (test_bit(vector, vcpu_to_synic(apic->vcpu)->vec_bitmap)) if (test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
kvm_hv_synic_send_eoi(apic->vcpu, vector); kvm_hv_synic_send_eoi(apic->vcpu, vector);
kvm_ioapic_send_eoi(apic, vector); kvm_ioapic_send_eoi(apic, vector);
...@@ -2512,7 +2512,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) ...@@ -2512,7 +2512,7 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
*/ */
apic_clear_irr(vector, apic); apic_clear_irr(vector, apic);
if (test_bit(vector, vcpu_to_synic(vcpu)->auto_eoi_bitmap)) { if (test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
/* /*
* For auto-EOI interrupts, there might be another pending * For auto-EOI interrupts, there might be another pending
* interrupt above PPR, so check whether to raise another * interrupt above PPR, so check whether to raise another
......
...@@ -8804,7 +8804,7 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) ...@@ -8804,7 +8804,7 @@ static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
return; return;
bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors,
vcpu_to_synic(vcpu)->vec_bitmap, 256); to_hv_synic(vcpu)->vec_bitmap, 256);
static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); static_call(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment