Commit 1cec2034 authored by Sean Christopherson's avatar Sean Christopherson

KVM: x86: Funnel all fancy MSR return value handling into a common helper

Add a common helper, kvm_do_msr_access(), to invoke the "leaf" APIs that
are type and access specific, and more importantly to handle errors that
are returned from the leaf APIs.  I.e. turn kvm_msr_ignored_check() from a
a helper that is called on an error, into a trampoline that detects errors
*and* applies relevant side effects, e.g. logging unimplemented accesses.

Because the leaf APIs are used for guest accesses, userspace accesses, and
KVM accesses, and because KVM supports restricting access to MSRs from
userspace via filters, the error handling is subtly non-trivial.  E.g. KVM
has had at least one bug escape due to making each "outer" function handle
errors.  See commit 3376ca3f ("KVM: x86: Fix KVM_GET_MSRS stack info
leak").

Link: https://lore.kernel.org/r/20240802181935.292540-8-seanjc@google.comSigned-off-by: default avatarSean Christopherson <seanjc@google.com>
parent 7075f163
...@@ -304,25 +304,40 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { ...@@ -304,25 +304,40 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
static struct kmem_cache *x86_emulator_cache; static struct kmem_cache *x86_emulator_cache;
/* typedef int (*msr_access_t)(struct kvm_vcpu *vcpu, u32 index, u64 *data,
* When called, it means the previous get/set msr reached an invalid msr. bool host_initiated);
* Return true if we want to ignore/silent this failed msr access.
*/ static __always_inline int kvm_do_msr_access(struct kvm_vcpu *vcpu, u32 msr,
static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) u64 *data, bool host_initiated,
enum kvm_msr_access rw,
msr_access_t msr_access_fn)
{ {
const char *op = write ? "wrmsr" : "rdmsr"; const char *op = rw == MSR_TYPE_W ? "wrmsr" : "rdmsr";
int ret;
if (ignore_msrs) { BUILD_BUG_ON(rw != MSR_TYPE_R && rw != MSR_TYPE_W);
if (report_ignored_msrs)
kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", /*
op, msr, data); * Zero the data on read failures to avoid leaking stack data to the
/* Mask the error */ * guest and/or userspace, e.g. if the failure is ignored below.
return true; */
} else { ret = msr_access_fn(vcpu, msr, data, host_initiated);
if (ret && rw == MSR_TYPE_R)
*data = 0;
if (ret != KVM_MSR_RET_UNSUPPORTED)
return ret;
if (!ignore_msrs) {
kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n", kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
op, msr, data); op, msr, *data);
return false; return ret;
} }
if (report_ignored_msrs)
kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n", op, msr, *data);
return 0;
} }
static struct kmem_cache *kvm_alloc_emulator_cache(void) static struct kmem_cache *kvm_alloc_emulator_cache(void)
...@@ -1682,16 +1697,8 @@ static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, ...@@ -1682,16 +1697,8 @@ static int kvm_get_feature_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) static int do_get_feature_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{ {
int r; return kvm_do_msr_access(vcpu, index, data, true, MSR_TYPE_R,
kvm_get_feature_msr);
/* Unconditionally clear the output for simplicity */
*data = 0;
r = kvm_get_feature_msr(vcpu, index, data, true);
if (r == KVM_MSR_RET_UNSUPPORTED && kvm_msr_ignored_check(index, 0, false))
r = 0;
return r;
} }
static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
...@@ -1878,16 +1885,17 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, ...@@ -1878,16 +1885,17 @@ static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
return kvm_x86_call(set_msr)(vcpu, &msr); return kvm_x86_call(set_msr)(vcpu, &msr);
} }
static int _kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
bool host_initiated)
{
return __kvm_set_msr(vcpu, index, *data, host_initiated);
}
static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 data, bool host_initiated) u32 index, u64 data, bool host_initiated)
{ {
int ret = __kvm_set_msr(vcpu, index, data, host_initiated); return kvm_do_msr_access(vcpu, index, &data, host_initiated, MSR_TYPE_W,
_kvm_set_msr);
if (ret == KVM_MSR_RET_UNSUPPORTED)
if (kvm_msr_ignored_check(index, data, true))
ret = 0;
return ret;
} }
/* /*
...@@ -1926,16 +1934,8 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, ...@@ -1926,16 +1934,8 @@ int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
u32 index, u64 *data, bool host_initiated) u32 index, u64 *data, bool host_initiated)
{ {
int ret = __kvm_get_msr(vcpu, index, data, host_initiated); return kvm_do_msr_access(vcpu, index, data, host_initiated, MSR_TYPE_R,
__kvm_get_msr);
if (ret == KVM_MSR_RET_UNSUPPORTED) {
/* Unconditionally clear *data for simplicity */
*data = 0;
if (kvm_msr_ignored_check(index, 0, false))
ret = 0;
}
return ret;
} }
static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment