Commit 44883f01 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86: ensure all MSRs can always be KVM_GET/SET_MSR'd

Some of the MSRs returned by GET_MSR_INDEX_LIST currently cannot be sent back
to KVM_GET_MSR and/or KVM_SET_MSR; either they can never be sent back, or you
they are only accepted under special conditions.  This makes the API a pain to
use.

To avoid this pain, this patch makes it so that the result of the get-list
ioctl can always be used for host-initiated get and set.  Since we don't have
a separate way to check for read-only MSRs, this means some Hyper-V MSRs are
ignored when written.  Arguably they should not even be in the result of
GET_MSR_INDEX_LIST, but I am leaving there in case userspace is using the
outcome of GET_MSR_INDEX_LIST to derive the support for the corresponding
Hyper-V feature.

Cc: stable@vger.kernel.org
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cf81a7e5
...@@ -235,7 +235,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, ...@@ -235,7 +235,7 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
struct kvm_vcpu *vcpu = synic_to_vcpu(synic); struct kvm_vcpu *vcpu = synic_to_vcpu(synic);
int ret; int ret;
if (!synic->active) if (!synic->active && !host)
return 1; return 1;
trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
...@@ -295,11 +295,12 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic, ...@@ -295,11 +295,12 @@ static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
return ret; return ret;
} }
static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata) static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
bool host)
{ {
int ret; int ret;
if (!synic->active) if (!synic->active && !host)
return 1; return 1;
ret = 0; ret = 0;
...@@ -1014,6 +1015,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data, ...@@ -1014,6 +1015,11 @@ static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
case HV_X64_MSR_TSC_EMULATION_STATUS: case HV_X64_MSR_TSC_EMULATION_STATUS:
hv->hv_tsc_emulation_status = data; hv->hv_tsc_emulation_status = data;
break; break;
case HV_X64_MSR_TIME_REF_COUNT:
/* read-only, but still ignore it if host-initiated */
if (!host)
return 1;
break;
default: default:
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
msr, data); msr, data);
...@@ -1101,6 +1107,12 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) ...@@ -1101,6 +1107,12 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
return stimer_set_count(vcpu_to_stimer(vcpu, timer_index), return stimer_set_count(vcpu_to_stimer(vcpu, timer_index),
data, host); data, host);
} }
case HV_X64_MSR_TSC_FREQUENCY:
case HV_X64_MSR_APIC_FREQUENCY:
/* read-only, but still ignore it if host-initiated */
if (!host)
return 1;
break;
default: default:
vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n", vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
msr, data); msr, data);
...@@ -1156,7 +1168,8 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -1156,7 +1168,8 @@ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
return 0; return 0;
} }
static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
bool host)
{ {
u64 data = 0; u64 data = 0;
struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv; struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
...@@ -1183,7 +1196,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -1183,7 +1196,7 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case HV_X64_MSR_SIMP: case HV_X64_MSR_SIMP:
case HV_X64_MSR_EOM: case HV_X64_MSR_EOM:
case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata); return synic_get_msr(vcpu_to_synic(vcpu), msr, pdata, host);
case HV_X64_MSR_STIMER0_CONFIG: case HV_X64_MSR_STIMER0_CONFIG:
case HV_X64_MSR_STIMER1_CONFIG: case HV_X64_MSR_STIMER1_CONFIG:
case HV_X64_MSR_STIMER2_CONFIG: case HV_X64_MSR_STIMER2_CONFIG:
...@@ -1229,7 +1242,7 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host) ...@@ -1229,7 +1242,7 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
return kvm_hv_set_msr(vcpu, msr, data, host); return kvm_hv_set_msr(vcpu, msr, data, host);
} }
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{ {
if (kvm_hv_msr_partition_wide(msr)) { if (kvm_hv_msr_partition_wide(msr)) {
int r; int r;
...@@ -1239,7 +1252,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -1239,7 +1252,7 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
return r; return r;
} else } else
return kvm_hv_get_msr(vcpu, msr, pdata); return kvm_hv_get_msr(vcpu, msr, pdata, host);
} }
static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no) static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
......
...@@ -48,7 +48,7 @@ static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic) ...@@ -48,7 +48,7 @@ static inline struct kvm_vcpu *synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
} }
int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host); int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
bool kvm_hv_hypercall_enabled(struct kvm *kvm); bool kvm_hv_hypercall_enabled(struct kvm *kvm);
int kvm_hv_hypercall(struct kvm_vcpu *vcpu); int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
......
...@@ -2160,10 +2160,11 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2160,10 +2160,11 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.mcg_status = data; vcpu->arch.mcg_status = data;
break; break;
case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_CTL:
if (!(mcg_cap & MCG_CTL_P)) if (!(mcg_cap & MCG_CTL_P) &&
(data || !msr_info->host_initiated))
return 1; return 1;
if (data != 0 && data != ~(u64)0) if (data != 0 && data != ~(u64)0)
return -1; return 1;
vcpu->arch.mcg_ctl = data; vcpu->arch.mcg_ctl = data;
break; break;
default: default:
...@@ -2551,7 +2552,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -2551,7 +2552,7 @@ int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
} }
EXPORT_SYMBOL_GPL(kvm_get_msr); EXPORT_SYMBOL_GPL(kvm_get_msr);
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
{ {
u64 data; u64 data;
u64 mcg_cap = vcpu->arch.mcg_cap; u64 mcg_cap = vcpu->arch.mcg_cap;
...@@ -2566,7 +2567,7 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) ...@@ -2566,7 +2567,7 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
data = vcpu->arch.mcg_cap; data = vcpu->arch.mcg_cap;
break; break;
case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_CTL:
if (!(mcg_cap & MCG_CTL_P)) if (!(mcg_cap & MCG_CTL_P) && !host)
return 1; return 1;
data = vcpu->arch.mcg_ctl; data = vcpu->arch.mcg_ctl;
break; break;
...@@ -2699,7 +2700,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2699,7 +2700,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_MCG_CTL: case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS: case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
return get_msr_mce(vcpu, msr_info->index, &msr_info->data); return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
msr_info->host_initiated);
case MSR_K7_CLK_CTL: case MSR_K7_CLK_CTL:
/* /*
* Provide expected ramp-up count for K7. All other * Provide expected ramp-up count for K7. All other
...@@ -2720,7 +2722,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2720,7 +2722,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case HV_X64_MSR_TSC_EMULATION_CONTROL: case HV_X64_MSR_TSC_EMULATION_CONTROL:
case HV_X64_MSR_TSC_EMULATION_STATUS: case HV_X64_MSR_TSC_EMULATION_STATUS:
return kvm_hv_get_msr_common(vcpu, return kvm_hv_get_msr_common(vcpu,
msr_info->index, &msr_info->data); msr_info->index, &msr_info->data,
msr_info->host_initiated);
break; break;
case MSR_IA32_BBL_CR_CTL3: case MSR_IA32_BBL_CR_CTL3:
/* This legacy MSR exists but isn't fully documented in current /* This legacy MSR exists but isn't fully documented in current
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment