Commit e911eb3b authored by Yu Zhang's avatar Yu Zhang Committed by Paolo Bonzini

KVM: x86: Add return value to kvm_cpuid().

Return false in kvm_cpuid() when it fails to find the cpuid
entry. Also, this routine(and its caller) is optimized with
a new argument - check_limit, so that the check_cpuid_limit()
fall back can be avoided.
Signed-off-by: default avatarYu Zhang <yu.c.zhang@linux.intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3db13480
...@@ -219,8 +219,8 @@ struct x86_emulate_ops { ...@@ -219,8 +219,8 @@ struct x86_emulate_ops {
struct x86_instruction_info *info, struct x86_instruction_info *info,
enum x86_intercept_stage stage); enum x86_intercept_stage stage);
void (*get_cpuid)(struct x86_emulate_ctxt *ctxt, bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, u32 *eax, u32 *ebx,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); u32 *ecx, u32 *edx, bool check_limit);
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked); void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt); unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
......
...@@ -853,16 +853,24 @@ static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu, ...@@ -853,16 +853,24 @@ static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index); return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
} }
void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
u32 *ecx, u32 *edx, bool check_limit)
{ {
u32 function = *eax, index = *ecx; u32 function = *eax, index = *ecx;
struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *best;
bool entry_found = true;
best = kvm_find_cpuid_entry(vcpu, function, index); best = kvm_find_cpuid_entry(vcpu, function, index);
if (!best) if (!best) {
entry_found = false;
if (!check_limit)
goto out;
best = check_cpuid_limit(vcpu, function, index); best = check_cpuid_limit(vcpu, function, index);
}
out:
if (best) { if (best) {
*eax = best->eax; *eax = best->eax;
*ebx = best->ebx; *ebx = best->ebx;
...@@ -870,7 +878,8 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) ...@@ -870,7 +878,8 @@ void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
*edx = best->edx; *edx = best->edx;
} else } else
*eax = *ebx = *ecx = *edx = 0; *eax = *ebx = *ecx = *edx = 0;
trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx); trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found);
return entry_found;
} }
EXPORT_SYMBOL_GPL(kvm_cpuid); EXPORT_SYMBOL_GPL(kvm_cpuid);
...@@ -883,7 +892,7 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu) ...@@ -883,7 +892,7 @@ int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
eax = kvm_register_read(vcpu, VCPU_REGS_RAX); eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx); kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
kvm_register_write(vcpu, VCPU_REGS_RAX, eax); kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
kvm_register_write(vcpu, VCPU_REGS_RBX, ebx); kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
kvm_register_write(vcpu, VCPU_REGS_RCX, ecx); kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
......
...@@ -21,7 +21,8 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, ...@@ -21,7 +21,8 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
struct kvm_cpuid2 *cpuid, struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries); struct kvm_cpuid_entry2 __user *entries);
void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
u32 *ecx, u32 *edx, bool check_limit);
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
......
...@@ -2333,7 +2333,7 @@ static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt) ...@@ -2333,7 +2333,7 @@ static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
eax = 0x80000001; eax = 0x80000001;
ecx = 0; ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
return edx & bit(X86_FEATURE_LM); return edx & bit(X86_FEATURE_LM);
} }
...@@ -2636,7 +2636,7 @@ static bool vendor_intel(struct x86_emulate_ctxt *ctxt) ...@@ -2636,7 +2636,7 @@ static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
u32 eax, ebx, ecx, edx; u32 eax, ebx, ecx, edx;
eax = ecx = 0; eax = ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
...@@ -2656,7 +2656,7 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) ...@@ -2656,7 +2656,7 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
eax = 0x00000000; eax = 0x00000000;
ecx = 0x00000000; ecx = 0x00000000;
ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
/* /*
* Intel ("GenuineIntel") * Intel ("GenuineIntel")
* remark: Intel CPUs only support "syscall" in 64bit * remark: Intel CPUs only support "syscall" in 64bit
...@@ -3551,7 +3551,7 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt) ...@@ -3551,7 +3551,7 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
/* /*
* Check MOVBE is set in the guest-visible CPUID leaf. * Check MOVBE is set in the guest-visible CPUID leaf.
*/ */
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
if (!(ecx & FFL(MOVBE))) if (!(ecx & FFL(MOVBE)))
return emulate_ud(ctxt); return emulate_ud(ctxt);
...@@ -3865,7 +3865,7 @@ static int em_cpuid(struct x86_emulate_ctxt *ctxt) ...@@ -3865,7 +3865,7 @@ static int em_cpuid(struct x86_emulate_ctxt *ctxt)
eax = reg_read(ctxt, VCPU_REGS_RAX); eax = reg_read(ctxt, VCPU_REGS_RAX);
ecx = reg_read(ctxt, VCPU_REGS_RCX); ecx = reg_read(ctxt, VCPU_REGS_RCX);
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, true);
*reg_write(ctxt, VCPU_REGS_RAX) = eax; *reg_write(ctxt, VCPU_REGS_RAX) = eax;
*reg_write(ctxt, VCPU_REGS_RBX) = ebx; *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
*reg_write(ctxt, VCPU_REGS_RCX) = ecx; *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
...@@ -3924,7 +3924,7 @@ static int check_fxsr(struct x86_emulate_ctxt *ctxt) ...@@ -3924,7 +3924,7 @@ static int check_fxsr(struct x86_emulate_ctxt *ctxt)
{ {
u32 eax = 1, ebx, ecx = 0, edx; u32 eax = 1, ebx, ecx = 0, edx;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
if (!(edx & FFL(FXSR))) if (!(edx & FFL(FXSR)))
return emulate_ud(ctxt); return emulate_ud(ctxt);
......
...@@ -1593,7 +1593,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -1593,7 +1593,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
} }
init_vmcb(svm); init_vmcb(svm);
kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy); kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
kvm_register_write(vcpu, VCPU_REGS_RDX, eax); kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
if (kvm_vcpu_apicv_active(vcpu) && !init_event) if (kvm_vcpu_apicv_active(vcpu) && !init_event)
......
...@@ -151,8 +151,8 @@ TRACE_EVENT(kvm_fast_mmio, ...@@ -151,8 +151,8 @@ TRACE_EVENT(kvm_fast_mmio,
*/ */
TRACE_EVENT(kvm_cpuid, TRACE_EVENT(kvm_cpuid,
TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx, TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx,
unsigned long rcx, unsigned long rdx), unsigned long rcx, unsigned long rdx, bool found),
TP_ARGS(function, rax, rbx, rcx, rdx), TP_ARGS(function, rax, rbx, rcx, rdx, found),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( unsigned int, function ) __field( unsigned int, function )
...@@ -160,6 +160,7 @@ TRACE_EVENT(kvm_cpuid, ...@@ -160,6 +160,7 @@ TRACE_EVENT(kvm_cpuid,
__field( unsigned long, rbx ) __field( unsigned long, rbx )
__field( unsigned long, rcx ) __field( unsigned long, rcx )
__field( unsigned long, rdx ) __field( unsigned long, rdx )
__field( bool, found )
), ),
TP_fast_assign( TP_fast_assign(
...@@ -168,11 +169,13 @@ TRACE_EVENT(kvm_cpuid, ...@@ -168,11 +169,13 @@ TRACE_EVENT(kvm_cpuid,
__entry->rbx = rbx; __entry->rbx = rbx;
__entry->rcx = rcx; __entry->rcx = rcx;
__entry->rdx = rdx; __entry->rdx = rdx;
__entry->found = found;
), ),
TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx", TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx, cpuid entry %s",
__entry->function, __entry->rax, __entry->function, __entry->rax,
__entry->rbx, __entry->rcx, __entry->rdx) __entry->rbx, __entry->rcx, __entry->rdx,
__entry->found ? "found" : "not found")
); );
#define AREG(x) { APIC_##x, "APIC_" #x } #define AREG(x) { APIC_##x, "APIC_" #x }
......
...@@ -5206,10 +5206,10 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, ...@@ -5206,10 +5206,10 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
} }
static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) u32 *eax, u32 *ebx, u32 *ecx, u32 *edx, bool check_limit)
{ {
kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, check_limit);
} }
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment