Commit 6abe9c13 authored by Peter Xu's avatar Peter Xu Committed by Paolo Bonzini

KVM: X86: Move ignore_msrs handling upper the stack

MSR accesses can be one of:

  (1) KVM internal access,
  (2) userspace access (e.g., via KVM_SET_MSRS ioctl),
  (3) guest access.

The ignore_msrs was previously handled by kvm_get_msr_common() and
kvm_set_msr_common(), which is the bottom of the msr access stack.  It's
working in most cases, however it could dump unwanted warning messages to dmesg
even if kvm get/set the msrs internally when calling __kvm_set_msr() or
__kvm_get_msr() (e.g. kvm_cpuid()).  Ideally we only want to trap cases (2)
or (3), but not (1) above.

To achieve this, move the ignore_msrs handling upper until the callers of
__kvm_get_msr() and __kvm_set_msr().  To identify the "msr missing" event, a
new return value (KVM_MSR_RET_INVALID==2) is used for that.
Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Message-Id: <20200622220442.21998-2-peterx@redhat.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 02f5fb2e
...@@ -243,6 +243,29 @@ static struct kmem_cache *x86_fpu_cache; ...@@ -243,6 +243,29 @@ static struct kmem_cache *x86_fpu_cache;
static struct kmem_cache *x86_emulator_cache; static struct kmem_cache *x86_emulator_cache;
/*
* When called, it means the previous get/set msr reached an invalid msr.
* Return 0 if we want to ignore/silent this failed msr access, or 1 if we want
* to fail the caller.
*/
static int kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
u64 data, bool write)
{
const char *op = write ? "wrmsr" : "rdmsr";
if (ignore_msrs) {
if (report_ignored_msrs)
vcpu_unimpl(vcpu, "ignored %s: 0x%x data 0x%llx\n",
op, msr, data);
/* Mask the error */
return 0;
} else {
vcpu_debug_ratelimited(vcpu, "unhandled %s: 0x%x data 0x%llx\n",
op, msr, data);
return 1;
}
}
static struct kmem_cache *kvm_alloc_emulator_cache(void) static struct kmem_cache *kvm_alloc_emulator_cache(void)
{ {
unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src); unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src);
...@@ -1516,6 +1539,17 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, ...@@ -1516,6 +1539,17 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
return kvm_x86_ops.set_msr(vcpu, &msr); return kvm_x86_ops.set_msr(vcpu, &msr);
} }
static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 data, bool host_initiated)
{
int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
if (ret == KVM_MSR_RET_INVALID)
ret = kvm_msr_ignored_check(vcpu, index, data, true);
return ret;
}
/* /*
* Read the MSR specified by @index into @data. Select MSR specific fault * Read the MSR specified by @index into @data. Select MSR specific fault
* checks are bypassed if @host_initiated is %true. * checks are bypassed if @host_initiated is %true.
...@@ -1537,15 +1571,29 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, ...@@ -1537,15 +1571,29 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
return ret; return ret;
} }
static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 *data, bool host_initiated)
{
int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
if (ret == KVM_MSR_RET_INVALID) {
/* Unconditionally clear *data for simplicity */
*data = 0;
ret = kvm_msr_ignored_check(vcpu, index, 0, false);
}
return ret;
}
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
{ {
return __kvm_get_msr(vcpu, index, data, false); return kvm_get_msr_ignored_check(vcpu, index, data, false);
} }
EXPORT_SYMBOL_GPL(kvm_get_msr); EXPORT_SYMBOL_GPL(kvm_get_msr);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
{ {
return __kvm_set_msr(vcpu, index, data, false); return kvm_set_msr_ignored_check(vcpu, index, data, false);
} }
EXPORT_SYMBOL_GPL(kvm_set_msr); EXPORT_SYMBOL_GPL(kvm_set_msr);
...@@ -1665,12 +1713,12 @@ EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff); ...@@ -1665,12 +1713,12 @@ EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
*/ */
static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{ {
return __kvm_get_msr(vcpu, index, data, true); return kvm_get_msr_ignored_check(vcpu, index, data, true);
} }
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{ {
return __kvm_set_msr(vcpu, index, *data, true); return kvm_set_msr_ignored_check(vcpu, index, *data, true);
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -3066,17 +3114,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3066,17 +3114,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return xen_hvm_config(vcpu, data); return xen_hvm_config(vcpu, data);
if (kvm_pmu_is_valid_msr(vcpu, msr)) if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info); return kvm_pmu_set_msr(vcpu, msr_info);
if (!ignore_msrs) { return KVM_MSR_RET_INVALID;
vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
msr, data);
return 1;
} else {
if (report_ignored_msrs)
vcpu_unimpl(vcpu,
"ignored wrmsr: 0x%x data 0x%llx\n",
msr, data);
break;
}
} }
return 0; return 0;
} }
...@@ -3331,17 +3369,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3331,17 +3369,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
default: default:
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info); return kvm_pmu_get_msr(vcpu, msr_info);
if (!ignore_msrs) { return KVM_MSR_RET_INVALID;
vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
msr_info->index);
return 1;
} else {
if (report_ignored_msrs)
vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
msr_info->index);
msr_info->data = 0;
}
break;
} }
return 0; return 0;
} }
......
...@@ -366,4 +366,6 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu); ...@@ -366,4 +366,6 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu); u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu); bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
#define KVM_MSR_RET_INVALID 2
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment