Commit 15608ed0 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Add helpers to perform CPUID-based guest vendor check

Add helpers to provide CPUID-based guest vendor checks, i.e. to do the
ugly register comparisons.  Use the new helpers to check for an AMD
guest vendor in guest_cpuid_is_amd() as well as in the existing emulator
flows.

Using the new helpers fixes a _very_ theoretical bug where
guest_cpuid_is_amd() would get a false positive on a non-AMD virtual CPU
with a vendor string beginning with "Auth" due to the previous logic
only checking EBX.  It also fixes a marginally less theoretically bug
where guest_cpuid_is_amd() would incorrectly return false for a guest
CPU with "AMDisbetter!" as its vendor string.

Fixes: a0c0feb5 ("KVM: x86: reserve bit 8 of non-leaf PDPEs and PML4Es in 64-bit mode on AMD")
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b7fb8488
...@@ -219,7 +219,7 @@ static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) ...@@ -219,7 +219,7 @@ static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 0, 0); best = kvm_find_cpuid_entry(vcpu, 0, 0);
return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; return best && is_guest_vendor_amd(best->ebx, best->ecx, best->edx);
} }
static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
......
...@@ -2723,9 +2723,7 @@ static bool vendor_intel(struct x86_emulate_ctxt *ctxt) ...@@ -2723,9 +2723,7 @@ static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
eax = ecx = 0; eax = ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx return is_guest_vendor_intel(ebx, ecx, edx);
&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
} }
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
...@@ -2744,34 +2742,16 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) ...@@ -2744,34 +2742,16 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
ecx = 0x00000000; ecx = 0x00000000;
ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false); ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
/* /*
* Intel ("GenuineIntel") * remark: Intel CPUs only support "syscall" in 64bit longmode. Also a
* remark: Intel CPUs only support "syscall" in 64bit * 64bit guest with a 32bit compat-app running will #UD !! While this
* longmode. Also an 64bit guest with a * behaviour can be fixed (by emulating) into AMD response - CPUs of
* 32bit compat-app running will #UD !! While this * AMD can't behave like Intel.
* behaviour can be fixed (by emulating) into AMD
* response - CPUs of AMD can't behave like Intel.
*/ */
if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && if (is_guest_vendor_intel(ebx, ecx, edx))
ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
return false; return false;
/* AMD ("AuthenticAMD") */ if (is_guest_vendor_amd(ebx, ecx, edx) ||
if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && is_guest_vendor_hygon(ebx, ecx, edx))
ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
return true;
/* AMD ("AMDisbetter!") */
if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
return true;
/* Hygon ("HygonGenuine") */
if (ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx)
return true; return true;
/* /*
......
...@@ -396,6 +396,30 @@ struct x86_emulate_ctxt { ...@@ -396,6 +396,30 @@ struct x86_emulate_ctxt {
#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e #define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 #define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
static inline bool is_guest_vendor_intel(u32 ebx, u32 ecx, u32 edx)
{
return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}
static inline bool is_guest_vendor_amd(u32 ebx, u32 ecx, u32 edx)
{
return (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) ||
(ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx);
}
static inline bool is_guest_vendor_hygon(u32 ebx, u32 ecx, u32 edx)
{
return ebx == X86EMUL_CPUID_VENDOR_HygonGenuine_ebx &&
ecx == X86EMUL_CPUID_VENDOR_HygonGenuine_ecx &&
edx == X86EMUL_CPUID_VENDOR_HygonGenuine_edx;
}
enum x86_intercept_stage { enum x86_intercept_stage {
X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */
X86_ICPT_PRE_EXCEPT, X86_ICPT_PRE_EXCEPT,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment