Commit c48b1fb8 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by Stefan Bader

x86/KVM/VMX: Split the VMX MSR LOAD structures to have an host/guest numbers

There is no semantic change but this change allows an unbalanced amount of
MSRs to be loaded on VMEXIT and VMENTER, i.e. the number of MSRs to save or
restore on VMEXIT or VMENTER may be different.

That is the number of MSRs to save or restore on VMEXIT or VMENTER may
be different.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>

CVE-2018-3620
CVE-2018-3646

[smb: Drop 2 hunks modifying nested which does not exist, yet]
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent 57880666
......@@ -579,6 +579,11 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}
struct vmx_msrs {
unsigned nr;
struct vmx_msr_entry val[NR_AUTOLOAD_MSRS];
};
struct vcpu_vmx {
struct kvm_vcpu vcpu;
unsigned long host_rsp;
......@@ -606,9 +611,8 @@ struct vcpu_vmx {
struct loaded_vmcs *loaded_vmcs;
bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload {
unsigned nr;
struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
struct vmx_msrs guest;
struct vmx_msrs host;
} msr_autoload;
struct {
int loaded;
......@@ -1771,18 +1775,18 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
}
break;
}
for (i = 0; i < m->nr; ++i)
if (m->guest[i].index == msr)
for (i = 0; i < m->guest.nr; ++i)
if (m->guest.val[i].index == msr)
break;
if (i == m->nr)
if (i == m->guest.nr)
return;
--m->nr;
m->guest[i] = m->guest[m->nr];
m->host[i] = m->host[m->nr];
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
--m->guest.nr;
--m->host.nr;
m->guest.val[i] = m->guest.val[m->guest.nr];
m->host.val[i] = m->host.val[m->host.nr];
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
}
static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
......@@ -1834,24 +1838,25 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
}
for (i = 0; i < m->nr; ++i)
if (m->guest[i].index == msr)
for (i = 0; i < m->guest.nr; ++i)
if (m->guest.val[i].index == msr)
break;
if (i == NR_AUTOLOAD_MSRS) {
printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
return;
} else if (i == m->nr) {
++m->nr;
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
} else if (i == m->guest.nr) {
++m->guest.nr;
++m->host.nr;
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
}
m->guest[i].index = msr;
m->guest[i].value = guest_val;
m->host[i].index = msr;
m->host[i].value = host_val;
m->guest.val[i].index = msr;
m->guest.val[i].value = guest_val;
m->host.val[i].index = msr;
m->host.val[i].value = host_val;
}
static void reload_tss(void)
......@@ -4949,9 +4954,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment