Commit 0339eb95 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull more kvm updates from Paolo Bonzini:
 "s390:
   - nested virtualization fixes

  x86:
   - split svm.c

   - miscellaneous fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: VMX: fix crash cleanup when KVM wasn't used
  KVM: X86: Filter out the broadcast dest for IPI fastpath
  KVM: s390: vsie: Fix possible race when shadowing region 3 tables
  KVM: s390: vsie: Fix delivery of addressing exceptions
  KVM: s390: vsie: Fix region 1 ASCE sanity shadow address checks
  KVM: nVMX: don't clear mtf_pending when nested events are blocked
  KVM: VMX: Remove unnecessary exception trampoline in vmx_vmenter
  KVM: SVM: Split svm_vcpu_run inline assembly to separate file
  KVM: SVM: Move SEV code to separate file
  KVM: SVM: Move AVIC code to separate file
  KVM: SVM: Move Nested SVM Implementation to nested.c
  kVM SVM: Move SVM related files to own sub-directory
parents 9bb71526 dbef2808
...@@ -1202,6 +1202,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) ...@@ -1202,6 +1202,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
scb_s->iprcc = PGM_ADDRESSING; scb_s->iprcc = PGM_ADDRESSING;
scb_s->pgmilc = 4; scb_s->pgmilc = 4;
scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4); scb_s->gpsw.addr = __rewind_psw(scb_s->gpsw, 4);
rc = 1;
} }
return rc; return rc;
} }
......
...@@ -787,14 +787,18 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start, ...@@ -787,14 +787,18 @@ static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
static inline unsigned long *gmap_table_walk(struct gmap *gmap, static inline unsigned long *gmap_table_walk(struct gmap *gmap,
unsigned long gaddr, int level) unsigned long gaddr, int level)
{ {
const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
unsigned long *table; unsigned long *table;
if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4)) if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
return NULL; return NULL;
if (gmap_is_shadow(gmap) && gmap->removed) if (gmap_is_shadow(gmap) && gmap->removed)
return NULL; return NULL;
if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
if (asce_type != _ASCE_TYPE_REGION1 &&
gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
return NULL; return NULL;
table = gmap->table; table = gmap->table;
switch (gmap->asce & _ASCE_TYPE_MASK) { switch (gmap->asce & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1: case _ASCE_TYPE_REGION1:
...@@ -1840,6 +1844,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t, ...@@ -1840,6 +1844,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
goto out_free; goto out_free;
} else if (*table & _REGION_ENTRY_ORIGIN) { } else if (*table & _REGION_ENTRY_ORIGIN) {
rc = -EAGAIN; /* Race with shadow */ rc = -EAGAIN; /* Race with shadow */
goto out_free;
} }
crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY); crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
/* mark as invalid as long as the parent table is not protected */ /* mark as invalid as long as the parent table is not protected */
......
...@@ -14,7 +14,7 @@ kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ ...@@ -14,7 +14,7 @@ kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o
kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o vmx/evmcs.o vmx/nested.o kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o vmx/evmcs.o vmx/nested.o
kvm-amd-y += svm.o pmu_amd.o kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o svm/sev.o
obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM_INTEL) += kvm-intel.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
......
...@@ -59,9 +59,6 @@ ...@@ -59,9 +59,6 @@
#define MAX_APIC_VECTOR 256 #define MAX_APIC_VECTOR 256
#define APIC_VECTORS_PER_REG 32 #define APIC_VECTORS_PER_REG 32
#define APIC_BROADCAST 0xFF
#define X2APIC_BROADCAST 0xFFFFFFFFul
static bool lapic_timer_advance_dynamic __read_mostly; static bool lapic_timer_advance_dynamic __read_mostly;
#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */ #define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */ #define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
......
...@@ -17,6 +17,9 @@ ...@@ -17,6 +17,9 @@
#define APIC_BUS_CYCLE_NS 1 #define APIC_BUS_CYCLE_NS 1
#define APIC_BUS_FREQUENCY (1000000000ULL / APIC_BUS_CYCLE_NS) #define APIC_BUS_FREQUENCY (1000000000ULL / APIC_BUS_CYCLE_NS)
#define APIC_BROADCAST 0xFF
#define X2APIC_BROADCAST 0xFFFFFFFFul
enum lapic_mode { enum lapic_mode {
LAPIC_MODE_DISABLED = 0, LAPIC_MODE_DISABLED = 0,
LAPIC_MODE_INVALID = X2APIC_ENABLE, LAPIC_MODE_INVALID = X2APIC_ENABLE,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/bitsperlong.h>
#include <asm/kvm_vcpu_regs.h>
#define WORD_SIZE (BITS_PER_LONG / 8)
/* Intentionally omit RAX as it's context switched by hardware */
#define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
#define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
#define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
/* Intentionally omit RSP as it's context switched by hardware */
#define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
#define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
#define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
#ifdef CONFIG_X86_64
#define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
#define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
#define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
#define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
#define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
#define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
#define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
#define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
#endif
.text
/**
* __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
* @vmcb_pa: unsigned long
* @regs: unsigned long * (to guest registers)
*/
SYM_FUNC_START(__svm_vcpu_run)
push %_ASM_BP
mov %_ASM_SP, %_ASM_BP
#ifdef CONFIG_X86_64
push %r15
push %r14
push %r13
push %r12
#else
push %edi
push %esi
#endif
push %_ASM_BX
/* Save @regs. */
push %_ASM_ARG2
/* Save @vmcb. */
push %_ASM_ARG1
/* Move @regs to RAX. */
mov %_ASM_ARG2, %_ASM_AX
/* Load guest registers. */
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
mov VCPU_RDX(%_ASM_AX), %_ASM_DX
mov VCPU_RBX(%_ASM_AX), %_ASM_BX
mov VCPU_RBP(%_ASM_AX), %_ASM_BP
mov VCPU_RSI(%_ASM_AX), %_ASM_SI
mov VCPU_RDI(%_ASM_AX), %_ASM_DI
#ifdef CONFIG_X86_64
mov VCPU_R8 (%_ASM_AX), %r8
mov VCPU_R9 (%_ASM_AX), %r9
mov VCPU_R10(%_ASM_AX), %r10
mov VCPU_R11(%_ASM_AX), %r11
mov VCPU_R12(%_ASM_AX), %r12
mov VCPU_R13(%_ASM_AX), %r13
mov VCPU_R14(%_ASM_AX), %r14
mov VCPU_R15(%_ASM_AX), %r15
#endif
/* "POP" @vmcb to RAX. */
pop %_ASM_AX
/* Enter guest mode */
1: vmload %_ASM_AX
jmp 3f
2: cmpb $0, kvm_rebooting
jne 3f
ud2
_ASM_EXTABLE(1b, 2b)
3: vmrun %_ASM_AX
jmp 5f
4: cmpb $0, kvm_rebooting
jne 5f
ud2
_ASM_EXTABLE(3b, 4b)
5: vmsave %_ASM_AX
jmp 7f
6: cmpb $0, kvm_rebooting
jne 7f
ud2
_ASM_EXTABLE(5b, 6b)
7:
/* "POP" @regs to RAX. */
pop %_ASM_AX
/* Save all guest registers. */
mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
#ifdef CONFIG_X86_64
mov %r8, VCPU_R8 (%_ASM_AX)
mov %r9, VCPU_R9 (%_ASM_AX)
mov %r10, VCPU_R10(%_ASM_AX)
mov %r11, VCPU_R11(%_ASM_AX)
mov %r12, VCPU_R12(%_ASM_AX)
mov %r13, VCPU_R13(%_ASM_AX)
mov %r14, VCPU_R14(%_ASM_AX)
mov %r15, VCPU_R15(%_ASM_AX)
#endif
/*
* Clear all general purpose registers except RSP and RAX to prevent
* speculative use of the guest's values, even those that are reloaded
* via the stack. In theory, an L1 cache miss when restoring registers
* could lead to speculative execution with the guest's values.
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
* free. RSP and RAX are exempt as they are restored by hardware
* during VM-Exit.
*/
xor %ecx, %ecx
xor %edx, %edx
xor %ebx, %ebx
xor %ebp, %ebp
xor %esi, %esi
xor %edi, %edi
#ifdef CONFIG_X86_64
xor %r8d, %r8d
xor %r9d, %r9d
xor %r10d, %r10d
xor %r11d, %r11d
xor %r12d, %r12d
xor %r13d, %r13d
xor %r14d, %r14d
xor %r15d, %r15d
#endif
pop %_ASM_BX
#ifdef CONFIG_X86_64
pop %r12
pop %r13
pop %r14
pop %r15
#else
pop %esi
pop %edi
#endif
pop %_ASM_BP
ret
SYM_FUNC_END(__svm_vcpu_run)
...@@ -3645,6 +3645,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu) ...@@ -3645,6 +3645,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
* Clear the MTF state. If a higher priority VM-exit is delivered first, * Clear the MTF state. If a higher priority VM-exit is delivered first,
* this state is discarded. * this state is discarded.
*/ */
if (!block_nested_events)
vmx->nested.mtf_pending = false; vmx->nested.mtf_pending = false;
if (lapic_in_kernel(vcpu) && if (lapic_in_kernel(vcpu) &&
......
...@@ -58,12 +58,8 @@ SYM_FUNC_START(vmx_vmenter) ...@@ -58,12 +58,8 @@ SYM_FUNC_START(vmx_vmenter)
ret ret
4: ud2 4: ud2
.pushsection .fixup, "ax" _ASM_EXTABLE(1b, 3b)
5: jmp 3b _ASM_EXTABLE(2b, 3b)
.popsection
_ASM_EXTABLE(1b, 5b)
_ASM_EXTABLE(2b, 5b)
SYM_FUNC_END(vmx_vmenter) SYM_FUNC_END(vmx_vmenter)
......
...@@ -2261,10 +2261,6 @@ static int hardware_enable(void) ...@@ -2261,10 +2261,6 @@ static int hardware_enable(void)
!hv_get_vp_assist_page(cpu)) !hv_get_vp_assist_page(cpu))
return -EFAULT; return -EFAULT;
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
r = kvm_cpu_vmxon(phys_addr); r = kvm_cpu_vmxon(phys_addr);
if (r) if (r)
return r; return r;
...@@ -8044,7 +8040,7 @@ module_exit(vmx_exit); ...@@ -8044,7 +8040,7 @@ module_exit(vmx_exit);
static int __init vmx_init(void) static int __init vmx_init(void)
{ {
int r; int r, cpu;
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
/* /*
...@@ -8098,6 +8094,12 @@ static int __init vmx_init(void) ...@@ -8098,6 +8094,12 @@ static int __init vmx_init(void)
return r; return r;
} }
for_each_possible_cpu(cpu) {
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
}
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
rcu_assign_pointer(crash_vmclear_loaded_vmcss, rcu_assign_pointer(crash_vmclear_loaded_vmcss,
crash_vmclear_local_loaded_vmcss); crash_vmclear_local_loaded_vmcss);
......
...@@ -1586,7 +1586,8 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data ...@@ -1586,7 +1586,8 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data
if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) && if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) && ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
((data & APIC_MODE_MASK) == APIC_DM_FIXED)) { ((data & APIC_MODE_MASK) == APIC_DM_FIXED) &&
((u32)(data >> 32) != X2APIC_BROADCAST)) {
data &= ~(1 << 12); data &= ~(1 << 12);
kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32)); kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment