Commit c45ff817 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by Greg Kroah-Hartman

x86/KVM/VMX: Extend add_atomic_switch_msr() to allow VMENTER only MSRs

commit 989e3992 upstream

The IA32_FLUSH_CMD MSR needs only to be written on VMENTER. Extend
add_atomic_switch_msr() with an entry_only parameter to allow storing the
MSR only in the guest (ENTRY) MSR array.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 5d3eaa2d
...@@ -2038,9 +2038,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, ...@@ -2038,9 +2038,9 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
} }
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
u64 guest_val, u64 host_val) u64 guest_val, u64 host_val, bool entry_only)
{ {
int i, j; int i, j = 0;
struct msr_autoload *m = &vmx->msr_autoload; struct msr_autoload *m = &vmx->msr_autoload;
switch (msr) { switch (msr) {
...@@ -2076,7 +2076,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, ...@@ -2076,7 +2076,9 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
} }
i = find_msr(&m->guest, msr); i = find_msr(&m->guest, msr);
j = find_msr(&m->host, msr); if (!entry_only)
j = find_msr(&m->host, msr);
if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
printk_once(KERN_WARNING "Not enough msr switch entries. " printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr); "Can't add msr %x\n", msr);
...@@ -2086,12 +2088,16 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, ...@@ -2086,12 +2088,16 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
i = m->guest.nr++; i = m->guest.nr++;
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
} }
m->guest.val[i].index = msr;
m->guest.val[i].value = guest_val;
if (entry_only)
return;
if (j < 0) { if (j < 0) {
j = m->host.nr++; j = m->host.nr++;
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
} }
m->guest.val[i].index = msr;
m->guest.val[i].value = guest_val;
m->host.val[j].index = msr; m->host.val[j].index = msr;
m->host.val[j].value = host_val; m->host.val[j].value = host_val;
} }
...@@ -2150,7 +2156,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) ...@@ -2150,7 +2156,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
guest_efer &= ~EFER_LME; guest_efer &= ~EFER_LME;
if (guest_efer != host_efer) if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER, add_atomic_switch_msr(vmx, MSR_EFER,
guest_efer, host_efer); guest_efer, host_efer, false);
return false; return false;
} else { } else {
guest_efer &= ~ignore_bits; guest_efer &= ~ignore_bits;
...@@ -3314,7 +3320,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3314,7 +3320,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.ia32_xss = data; vcpu->arch.ia32_xss = data;
if (vcpu->arch.ia32_xss != host_xss) if (vcpu->arch.ia32_xss != host_xss)
add_atomic_switch_msr(vmx, MSR_IA32_XSS, add_atomic_switch_msr(vmx, MSR_IA32_XSS,
vcpu->arch.ia32_xss, host_xss); vcpu->arch.ia32_xss, host_xss, false);
else else
clear_atomic_switch_msr(vmx, MSR_IA32_XSS); clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
break; break;
...@@ -8985,7 +8991,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) ...@@ -8985,7 +8991,7 @@ static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
clear_atomic_switch_msr(vmx, msrs[i].msr); clear_atomic_switch_msr(vmx, msrs[i].msr);
else else
add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
msrs[i].host); msrs[i].host, false);
} }
void vmx_arm_hv_timer(struct kvm_vcpu *vcpu) void vmx_arm_hv_timer(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment