Commit f3c5e63c authored by Michael Kelley's avatar Michael Kelley Committed by Wei Liu

Drivers: hv: Redo Hyper-V synthetic MSR get/set functions

Current code defines a separate get and set macro for each Hyper-V
synthetic MSR used by the VMbus driver. Furthermore, the get macro
can't be converted to a standard function because the second argument
is modified in place, which is somewhat bad form.

Redo this by providing a single get and a single set function that
take a parameter specifying the MSR to be operated on. Fixup usage
of the get function. Calling locations are no more complex than before,
but the code under arch/x86 and the upcoming code under arch/arm64
is significantly simplified.

Also standardize the names of Hyper-V synthetic MSRs that are
architecture neutral. But keep the old x86-specific names as aliases
that can be removed later when all references (particularly in KVM
code) have been cleaned up in a separate patch series.

No functional change.
Signed-off-by: default avatarMichael Kelley <mikelley@microsoft.com>
Reviewed-by: default avatarBoqun Feng <boqun.feng@gmail.com>
Link: https://lore.kernel.org/r/1614721102-2241-4-git-send-email-mikelley@microsoft.comSigned-off-by: default avatarWei Liu <wei.liu@kernel.org>
parent 5e4e6ddf
...@@ -75,7 +75,7 @@ static int hv_cpu_init(unsigned int cpu) ...@@ -75,7 +75,7 @@ static int hv_cpu_init(unsigned int cpu)
*output_arg = page_address(pg + 1); *output_arg = page_address(pg + 1);
} }
hv_get_vp_index(msr_vp_index); msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
hv_vp_index[smp_processor_id()] = msr_vp_index; hv_vp_index[smp_processor_id()] = msr_vp_index;
......
...@@ -156,7 +156,7 @@ enum hv_isolation_type { ...@@ -156,7 +156,7 @@ enum hv_isolation_type {
#define HV_X64_MSR_HYPERCALL 0x40000001 #define HV_X64_MSR_HYPERCALL 0x40000001
/* MSR used to provide vcpu index */ /* MSR used to provide vcpu index */
#define HV_X64_MSR_VP_INDEX 0x40000002 #define HV_REGISTER_VP_INDEX 0x40000002
/* MSR used to reset the guest OS. */ /* MSR used to reset the guest OS. */
#define HV_X64_MSR_RESET 0x40000003 #define HV_X64_MSR_RESET 0x40000003
...@@ -165,10 +165,10 @@ enum hv_isolation_type { ...@@ -165,10 +165,10 @@ enum hv_isolation_type {
#define HV_X64_MSR_VP_RUNTIME 0x40000010 #define HV_X64_MSR_VP_RUNTIME 0x40000010
/* MSR used to read the per-partition time reference counter */ /* MSR used to read the per-partition time reference counter */
#define HV_X64_MSR_TIME_REF_COUNT 0x40000020 #define HV_REGISTER_TIME_REF_COUNT 0x40000020
/* A partition's reference time stamp counter (TSC) page */ /* A partition's reference time stamp counter (TSC) page */
#define HV_X64_MSR_REFERENCE_TSC 0x40000021 #define HV_REGISTER_REFERENCE_TSC 0x40000021
/* MSR used to retrieve the TSC frequency */ /* MSR used to retrieve the TSC frequency */
#define HV_X64_MSR_TSC_FREQUENCY 0x40000022 #define HV_X64_MSR_TSC_FREQUENCY 0x40000022
...@@ -183,50 +183,50 @@ enum hv_isolation_type { ...@@ -183,50 +183,50 @@ enum hv_isolation_type {
#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073 #define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
/* Define synthetic interrupt controller model specific registers. */ /* Define synthetic interrupt controller model specific registers. */
#define HV_X64_MSR_SCONTROL 0x40000080 #define HV_REGISTER_SCONTROL 0x40000080
#define HV_X64_MSR_SVERSION 0x40000081 #define HV_REGISTER_SVERSION 0x40000081
#define HV_X64_MSR_SIEFP 0x40000082 #define HV_REGISTER_SIEFP 0x40000082
#define HV_X64_MSR_SIMP 0x40000083 #define HV_REGISTER_SIMP 0x40000083
#define HV_X64_MSR_EOM 0x40000084 #define HV_REGISTER_EOM 0x40000084
#define HV_X64_MSR_SINT0 0x40000090 #define HV_REGISTER_SINT0 0x40000090
#define HV_X64_MSR_SINT1 0x40000091 #define HV_REGISTER_SINT1 0x40000091
#define HV_X64_MSR_SINT2 0x40000092 #define HV_REGISTER_SINT2 0x40000092
#define HV_X64_MSR_SINT3 0x40000093 #define HV_REGISTER_SINT3 0x40000093
#define HV_X64_MSR_SINT4 0x40000094 #define HV_REGISTER_SINT4 0x40000094
#define HV_X64_MSR_SINT5 0x40000095 #define HV_REGISTER_SINT5 0x40000095
#define HV_X64_MSR_SINT6 0x40000096 #define HV_REGISTER_SINT6 0x40000096
#define HV_X64_MSR_SINT7 0x40000097 #define HV_REGISTER_SINT7 0x40000097
#define HV_X64_MSR_SINT8 0x40000098 #define HV_REGISTER_SINT8 0x40000098
#define HV_X64_MSR_SINT9 0x40000099 #define HV_REGISTER_SINT9 0x40000099
#define HV_X64_MSR_SINT10 0x4000009A #define HV_REGISTER_SINT10 0x4000009A
#define HV_X64_MSR_SINT11 0x4000009B #define HV_REGISTER_SINT11 0x4000009B
#define HV_X64_MSR_SINT12 0x4000009C #define HV_REGISTER_SINT12 0x4000009C
#define HV_X64_MSR_SINT13 0x4000009D #define HV_REGISTER_SINT13 0x4000009D
#define HV_X64_MSR_SINT14 0x4000009E #define HV_REGISTER_SINT14 0x4000009E
#define HV_X64_MSR_SINT15 0x4000009F #define HV_REGISTER_SINT15 0x4000009F
/* /*
* Synthetic Timer MSRs. Four timers per vcpu. * Synthetic Timer MSRs. Four timers per vcpu.
*/ */
#define HV_X64_MSR_STIMER0_CONFIG 0x400000B0 #define HV_REGISTER_STIMER0_CONFIG 0x400000B0
#define HV_X64_MSR_STIMER0_COUNT 0x400000B1 #define HV_REGISTER_STIMER0_COUNT 0x400000B1
#define HV_X64_MSR_STIMER1_CONFIG 0x400000B2 #define HV_REGISTER_STIMER1_CONFIG 0x400000B2
#define HV_X64_MSR_STIMER1_COUNT 0x400000B3 #define HV_REGISTER_STIMER1_COUNT 0x400000B3
#define HV_X64_MSR_STIMER2_CONFIG 0x400000B4 #define HV_REGISTER_STIMER2_CONFIG 0x400000B4
#define HV_X64_MSR_STIMER2_COUNT 0x400000B5 #define HV_REGISTER_STIMER2_COUNT 0x400000B5
#define HV_X64_MSR_STIMER3_CONFIG 0x400000B6 #define HV_REGISTER_STIMER3_CONFIG 0x400000B6
#define HV_X64_MSR_STIMER3_COUNT 0x400000B7 #define HV_REGISTER_STIMER3_COUNT 0x400000B7
/* Hyper-V guest idle MSR */ /* Hyper-V guest idle MSR */
#define HV_X64_MSR_GUEST_IDLE 0x400000F0 #define HV_X64_MSR_GUEST_IDLE 0x400000F0
/* Hyper-V guest crash notification MSR's */ /* Hyper-V guest crash notification MSR's */
#define HV_X64_MSR_CRASH_P0 0x40000100 #define HV_REGISTER_CRASH_P0 0x40000100
#define HV_X64_MSR_CRASH_P1 0x40000101 #define HV_REGISTER_CRASH_P1 0x40000101
#define HV_X64_MSR_CRASH_P2 0x40000102 #define HV_REGISTER_CRASH_P2 0x40000102
#define HV_X64_MSR_CRASH_P3 0x40000103 #define HV_REGISTER_CRASH_P3 0x40000103
#define HV_X64_MSR_CRASH_P4 0x40000104 #define HV_REGISTER_CRASH_P4 0x40000104
#define HV_X64_MSR_CRASH_CTL 0x40000105 #define HV_REGISTER_CRASH_CTL 0x40000105
/* TSC emulation after migration */ /* TSC emulation after migration */
#define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 #define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106
...@@ -236,6 +236,32 @@ enum hv_isolation_type { ...@@ -236,6 +236,32 @@ enum hv_isolation_type {
/* TSC invariant control */ /* TSC invariant control */
#define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118 #define HV_X64_MSR_TSC_INVARIANT_CONTROL 0x40000118
/* Register name aliases for temporary compatibility */
#define HV_X64_MSR_STIMER0_COUNT HV_REGISTER_STIMER0_COUNT
#define HV_X64_MSR_STIMER0_CONFIG HV_REGISTER_STIMER0_CONFIG
#define HV_X64_MSR_STIMER1_COUNT HV_REGISTER_STIMER1_COUNT
#define HV_X64_MSR_STIMER1_CONFIG HV_REGISTER_STIMER1_CONFIG
#define HV_X64_MSR_STIMER2_COUNT HV_REGISTER_STIMER2_COUNT
#define HV_X64_MSR_STIMER2_CONFIG HV_REGISTER_STIMER2_CONFIG
#define HV_X64_MSR_STIMER3_COUNT HV_REGISTER_STIMER3_COUNT
#define HV_X64_MSR_STIMER3_CONFIG HV_REGISTER_STIMER3_CONFIG
#define HV_X64_MSR_SCONTROL HV_REGISTER_SCONTROL
#define HV_X64_MSR_SVERSION HV_REGISTER_SVERSION
#define HV_X64_MSR_SIMP HV_REGISTER_SIMP
#define HV_X64_MSR_SIEFP HV_REGISTER_SIEFP
#define HV_X64_MSR_VP_INDEX HV_REGISTER_VP_INDEX
#define HV_X64_MSR_EOM HV_REGISTER_EOM
#define HV_X64_MSR_SINT0 HV_REGISTER_SINT0
#define HV_X64_MSR_SINT15 HV_REGISTER_SINT15
#define HV_X64_MSR_CRASH_P0 HV_REGISTER_CRASH_P0
#define HV_X64_MSR_CRASH_P1 HV_REGISTER_CRASH_P1
#define HV_X64_MSR_CRASH_P2 HV_REGISTER_CRASH_P2
#define HV_X64_MSR_CRASH_P3 HV_REGISTER_CRASH_P3
#define HV_X64_MSR_CRASH_P4 HV_REGISTER_CRASH_P4
#define HV_X64_MSR_CRASH_CTL HV_REGISTER_CRASH_CTL
#define HV_X64_MSR_TIME_REF_COUNT HV_REGISTER_TIME_REF_COUNT
#define HV_X64_MSR_REFERENCE_TSC HV_REGISTER_REFERENCE_TSC
/* /*
* Declare the MSR used to setup pages used to communicate with the hypervisor. * Declare the MSR used to setup pages used to communicate with the hypervisor.
*/ */
......
...@@ -14,41 +14,22 @@ typedef int (*hyperv_fill_flush_list_func)( ...@@ -14,41 +14,22 @@ typedef int (*hyperv_fill_flush_list_func)(
struct hv_guest_mapping_flush_list *flush, struct hv_guest_mapping_flush_list *flush,
void *data); void *data);
#define hv_init_timer(timer, tick) \ static inline void hv_set_register(unsigned int reg, u64 value)
wrmsrl(HV_X64_MSR_STIMER0_COUNT + (2*timer), tick) {
#define hv_init_timer_config(timer, val) \ wrmsrl(reg, value);
wrmsrl(HV_X64_MSR_STIMER0_CONFIG + (2*timer), val) }
#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
#define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
#define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
#define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
#define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index) static inline u64 hv_get_register(unsigned int reg)
{
u64 value;
#define hv_signal_eom() wrmsrl(HV_X64_MSR_EOM, 0) rdmsrl(reg, value);
return value;
}
#define hv_get_synint_state(int_num, val) \
rdmsrl(HV_X64_MSR_SINT0 + int_num, val)
#define hv_set_synint_state(int_num, val) \
wrmsrl(HV_X64_MSR_SINT0 + int_num, val)
#define hv_recommend_using_aeoi() \ #define hv_recommend_using_aeoi() \
(!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED)) (!(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED))
#define hv_get_crash_ctl(val) \
rdmsrl(HV_X64_MSR_CRASH_CTL, val)
#define hv_get_time_ref_count(val) \
rdmsrl(HV_X64_MSR_TIME_REF_COUNT, val)
#define hv_get_reference_tsc(val) \
rdmsrl(HV_X64_MSR_REFERENCE_TSC, val)
#define hv_set_reference_tsc(val) \
wrmsrl(HV_X64_MSR_REFERENCE_TSC, val)
#define hv_set_clocksource_vdso(val) \ #define hv_set_clocksource_vdso(val) \
((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK) ((val).vdso_clock_mode = VDSO_CLOCKMODE_HVCLOCK)
#define hv_enable_vdso_clocksource() \ #define hv_enable_vdso_clocksource() \
......
...@@ -68,14 +68,14 @@ static int hv_ce_set_next_event(unsigned long delta, ...@@ -68,14 +68,14 @@ static int hv_ce_set_next_event(unsigned long delta,
current_tick = hv_read_reference_counter(); current_tick = hv_read_reference_counter();
current_tick += delta; current_tick += delta;
hv_init_timer(0, current_tick); hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick);
return 0; return 0;
} }
static int hv_ce_shutdown(struct clock_event_device *evt) static int hv_ce_shutdown(struct clock_event_device *evt)
{ {
hv_init_timer(0, 0); hv_set_register(HV_REGISTER_STIMER0_COUNT, 0);
hv_init_timer_config(0, 0); hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0);
if (direct_mode_enabled) if (direct_mode_enabled)
hv_disable_stimer0_percpu_irq(stimer0_irq); hv_disable_stimer0_percpu_irq(stimer0_irq);
...@@ -105,7 +105,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt) ...@@ -105,7 +105,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.direct_mode = 0; timer_cfg.direct_mode = 0;
timer_cfg.sintx = stimer0_message_sint; timer_cfg.sintx = stimer0_message_sint;
} }
hv_init_timer_config(0, timer_cfg.as_uint64); hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64);
return 0; return 0;
} }
...@@ -331,7 +331,7 @@ static u64 notrace read_hv_clock_tsc(void) ...@@ -331,7 +331,7 @@ static u64 notrace read_hv_clock_tsc(void)
u64 current_tick = hv_read_tsc_page(hv_get_tsc_page()); u64 current_tick = hv_read_tsc_page(hv_get_tsc_page());
if (current_tick == U64_MAX) if (current_tick == U64_MAX)
hv_get_time_ref_count(current_tick); current_tick = hv_get_register(HV_REGISTER_TIME_REF_COUNT);
return current_tick; return current_tick;
} }
...@@ -352,9 +352,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg) ...@@ -352,9 +352,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg)
u64 tsc_msr; u64 tsc_msr;
/* Disable the TSC page */ /* Disable the TSC page */
hv_get_reference_tsc(tsc_msr); tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr &= ~BIT_ULL(0); tsc_msr &= ~BIT_ULL(0);
hv_set_reference_tsc(tsc_msr); hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
} }
...@@ -364,10 +364,10 @@ static void resume_hv_clock_tsc(struct clocksource *arg) ...@@ -364,10 +364,10 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
u64 tsc_msr; u64 tsc_msr;
/* Re-enable the TSC page */ /* Re-enable the TSC page */
hv_get_reference_tsc(tsc_msr); tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr &= GENMASK_ULL(11, 0); tsc_msr &= GENMASK_ULL(11, 0);
tsc_msr |= BIT_ULL(0) | (u64)phys_addr; tsc_msr |= BIT_ULL(0) | (u64)phys_addr;
hv_set_reference_tsc(tsc_msr); hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
} }
static int hv_cs_enable(struct clocksource *cs) static int hv_cs_enable(struct clocksource *cs)
...@@ -389,14 +389,12 @@ static struct clocksource hyperv_cs_tsc = { ...@@ -389,14 +389,12 @@ static struct clocksource hyperv_cs_tsc = {
static u64 notrace read_hv_clock_msr(void) static u64 notrace read_hv_clock_msr(void)
{ {
u64 current_tick;
/* /*
* Read the partition counter to get the current tick count. This count * Read the partition counter to get the current tick count. This count
* is set to 0 when the partition is created and is incremented in * is set to 0 when the partition is created and is incremented in
* 100 nanosecond units. * 100 nanosecond units.
*/ */
hv_get_time_ref_count(current_tick); return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
return current_tick;
} }
static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg) static u64 notrace read_hv_clock_msr_cs(struct clocksource *arg)
...@@ -439,10 +437,10 @@ static bool __init hv_init_tsc_clocksource(void) ...@@ -439,10 +437,10 @@ static bool __init hv_init_tsc_clocksource(void)
* (which already has at least the low 12 bits set to zero since * (which already has at least the low 12 bits set to zero since
* it is page aligned). Also set the "enable" bit, which is bit 0. * it is page aligned). Also set the "enable" bit, which is bit 0.
*/ */
hv_get_reference_tsc(tsc_msr); tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC);
tsc_msr &= GENMASK_ULL(11, 0); tsc_msr &= GENMASK_ULL(11, 0);
tsc_msr = tsc_msr | 0x1 | (u64)phys_addr; tsc_msr = tsc_msr | 0x1 | (u64)phys_addr;
hv_set_reference_tsc(tsc_msr); hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr);
hv_set_clocksource_vdso(hyperv_cs_tsc); hv_set_clocksource_vdso(hyperv_cs_tsc);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
......
...@@ -198,34 +198,36 @@ void hv_synic_enable_regs(unsigned int cpu) ...@@ -198,34 +198,36 @@ void hv_synic_enable_regs(unsigned int cpu)
union hv_synic_scontrol sctrl; union hv_synic_scontrol sctrl;
/* Setup the Synic's message page */ /* Setup the Synic's message page */
hv_get_simp(simp.as_uint64); simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1; simp.simp_enabled = 1;
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page) simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
>> HV_HYP_PAGE_SHIFT; >> HV_HYP_PAGE_SHIFT;
hv_set_simp(simp.as_uint64); hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
/* Setup the Synic's event page */ /* Setup the Synic's event page */
hv_get_siefp(siefp.as_uint64); siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1; siefp.siefp_enabled = 1;
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page) siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
>> HV_HYP_PAGE_SHIFT; >> HV_HYP_PAGE_SHIFT;
hv_set_siefp(siefp.as_uint64); hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
/* Setup the shared SINT. */ /* Setup the shared SINT. */
hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
VMBUS_MESSAGE_SINT);
shared_sint.vector = hv_get_vector(); shared_sint.vector = hv_get_vector();
shared_sint.masked = false; shared_sint.masked = false;
shared_sint.auto_eoi = hv_recommend_using_aeoi(); shared_sint.auto_eoi = hv_recommend_using_aeoi();
hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
/* Enable the global synic bit */ /* Enable the global synic bit */
hv_get_synic_state(sctrl.as_uint64); sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
sctrl.enable = 1; sctrl.enable = 1;
hv_set_synic_state(sctrl.as_uint64); hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
} }
int hv_synic_init(unsigned int cpu) int hv_synic_init(unsigned int cpu)
...@@ -247,32 +249,35 @@ void hv_synic_disable_regs(unsigned int cpu) ...@@ -247,32 +249,35 @@ void hv_synic_disable_regs(unsigned int cpu)
union hv_synic_siefp siefp; union hv_synic_siefp siefp;
union hv_synic_scontrol sctrl; union hv_synic_scontrol sctrl;
hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
VMBUS_MESSAGE_SINT);
shared_sint.masked = 1; shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */ /* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */ /* Disable the interrupt */
hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
shared_sint.as_uint64);
hv_get_simp(simp.as_uint64); simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 0; simp.simp_enabled = 0;
simp.base_simp_gpa = 0; simp.base_simp_gpa = 0;
hv_set_simp(simp.as_uint64); hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
hv_get_siefp(siefp.as_uint64); siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0; siefp.siefp_enabled = 0;
siefp.base_siefp_gpa = 0; siefp.base_siefp_gpa = 0;
hv_set_siefp(siefp.as_uint64); hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
/* Disable the global synic bit */ /* Disable the global synic bit */
hv_get_synic_state(sctrl.as_uint64); sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
sctrl.enable = 0; sctrl.enable = 0;
hv_set_synic_state(sctrl.as_uint64); hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
} }
int hv_synic_cleanup(unsigned int cpu) int hv_synic_cleanup(unsigned int cpu)
{ {
struct vmbus_channel *channel, *sc; struct vmbus_channel *channel, *sc;
......
...@@ -1521,7 +1521,7 @@ static int vmbus_bus_init(void) ...@@ -1521,7 +1521,7 @@ static int vmbus_bus_init(void)
* Register for panic kmsg callback only if the right * Register for panic kmsg callback only if the right
* capability is supported by the hypervisor. * capability is supported by the hypervisor.
*/ */
hv_get_crash_ctl(hyperv_crash_ctl); hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG) if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
hv_kmsg_dump_register(); hv_kmsg_dump_register();
......
...@@ -88,7 +88,7 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) ...@@ -88,7 +88,7 @@ static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
* possibly deliver another msg from the * possibly deliver another msg from the
* hypervisor * hypervisor
*/ */
hv_signal_eom(); hv_set_register(HV_REGISTER_EOM, 0);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment