Commit 628e04df authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull KVM fixes from Paolo Bonzini:
 "Bugfixes and strengthening the validity checks on inputs from new
  userspace APIs.

  Now I know why I shouldn't prepare pull requests on the weekend, it's
  hard to concentrate if your son is shouting about his latest Minecraft
  builds in your ear. Fortunately all the patches were ready and I just
  had to check the test results..."

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: SVM: Fix disable pause loop exit/pause filtering capability on SVM
  KVM: LAPIC: Prevent setting the tscdeadline timer if the lapic is hw disabled
  KVM: arm64: Don't inherit exec permission across page-table levels
  KVM: arm64: Prevent vcpu_has_ptrauth from generating OOL functions
  KVM: nVMX: check for invalid hdr.vmx.flags
  KVM: nVMX: check for required but missing VMCS12 in KVM_SET_NESTED_STATE
  selftests: kvm: do not set guest mode flag
parents ac3a0c84 830f01b0
...@@ -380,9 +380,14 @@ struct kvm_vcpu_arch { ...@@ -380,9 +380,14 @@ struct kvm_vcpu_arch {
#define vcpu_has_sve(vcpu) (system_supports_sve() && \ #define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
#define vcpu_has_ptrauth(vcpu) ((system_supports_address_auth() || \ #ifdef CONFIG_ARM64_PTR_AUTH
system_supports_generic_auth()) && \ #define vcpu_has_ptrauth(vcpu) \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)) ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
(vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
#else
#define vcpu_has_ptrauth(vcpu) false
#endif
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) #define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
......
...@@ -1326,7 +1326,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, ...@@ -1326,7 +1326,7 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, phys_addr_t addr,
return true; return true;
} }
static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr, unsigned long sz)
{ {
pud_t *pudp; pud_t *pudp;
pmd_t *pmdp; pmd_t *pmdp;
...@@ -1338,11 +1338,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr) ...@@ -1338,11 +1338,11 @@ static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
return false; return false;
if (pudp) if (pudp)
return kvm_s2pud_exec(pudp); return sz <= PUD_SIZE && kvm_s2pud_exec(pudp);
else if (pmdp) else if (pmdp)
return kvm_s2pmd_exec(pmdp); return sz <= PMD_SIZE && kvm_s2pmd_exec(pmdp);
else else
return kvm_s2pte_exec(ptep); return sz == PAGE_SIZE && kvm_s2pte_exec(ptep);
} }
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
...@@ -1958,7 +1958,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -1958,7 +1958,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* execute permissions, and we preserve whatever we have. * execute permissions, and we preserve whatever we have.
*/ */
needs_exec = exec_fault || needs_exec = exec_fault ||
(fault_status == FSC_PERM && stage2_is_exec(kvm, fault_ipa)); (fault_status == FSC_PERM &&
stage2_is_exec(kvm, fault_ipa, vma_pagesize));
if (vma_pagesize == PUD_SIZE) { if (vma_pagesize == PUD_SIZE) {
pud_t new_pud = kvm_pfn_pud(pfn, mem_type); pud_t new_pud = kvm_pfn_pud(pfn, mem_type);
......
...@@ -2195,7 +2195,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) ...@@ -2195,7 +2195,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
{ {
struct kvm_lapic *apic = vcpu->arch.apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) || if (!kvm_apic_present(vcpu) || apic_lvtt_oneshot(apic) ||
apic_lvtt_period(apic)) apic_lvtt_period(apic))
return; return;
......
...@@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm) ...@@ -1090,7 +1090,7 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->nested.vmcb = 0; svm->nested.vmcb = 0;
svm->vcpu.arch.hflags = 0; svm->vcpu.arch.hflags = 0;
if (pause_filter_count) { if (!kvm_pause_in_guest(svm->vcpu.kvm)) {
control->pause_filter_count = pause_filter_count; control->pause_filter_count = pause_filter_count;
if (pause_filter_thresh) if (pause_filter_thresh)
control->pause_filter_thresh = pause_filter_thresh; control->pause_filter_thresh = pause_filter_thresh;
...@@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm) ...@@ -2693,7 +2693,7 @@ static int pause_interception(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu; struct kvm_vcpu *vcpu = &svm->vcpu;
bool in_kernel = (svm_get_cpl(vcpu) == 0); bool in_kernel = (svm_get_cpl(vcpu) == 0);
if (pause_filter_thresh) if (!kvm_pause_in_guest(vcpu->kvm))
grow_ple_window(vcpu); grow_ple_window(vcpu);
kvm_vcpu_on_spin(vcpu, in_kernel); kvm_vcpu_on_spin(vcpu, in_kernel);
...@@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) ...@@ -3780,7 +3780,7 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{ {
if (pause_filter_thresh) if (!kvm_pause_in_guest(vcpu->kvm))
shrink_ple_window(vcpu); shrink_ple_window(vcpu);
} }
...@@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm) ...@@ -3958,6 +3958,9 @@ static void svm_vm_destroy(struct kvm *kvm)
static int svm_vm_init(struct kvm *kvm) static int svm_vm_init(struct kvm *kvm)
{ {
if (!pause_filter_count || !pause_filter_thresh)
kvm->arch.pause_in_guest = true;
if (avic) { if (avic) {
int ret = avic_vm_init(kvm); int ret = avic_vm_init(kvm);
if (ret) if (ret)
......
...@@ -6079,6 +6079,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -6079,6 +6079,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON)) ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
return -EINVAL; return -EINVAL;
if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
return -EINVAL;
/* /*
* SMM temporarily disables VMX, so we cannot be in guest mode, * SMM temporarily disables VMX, so we cannot be in guest mode,
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
...@@ -6108,9 +6111,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu, ...@@ -6108,9 +6111,16 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (ret) if (ret)
return ret; return ret;
/* Empty 'VMXON' state is permitted */ /* Empty 'VMXON' state is permitted if no VMCS loaded */
if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
return 0; /* See vmx_has_valid_vmcs12. */
if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
(kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
(kvm_state->hdr.vmx.vmcs12_pa != -1ull))
return -EINVAL;
else
return 0;
}
if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
......
...@@ -47,6 +47,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu) ...@@ -47,6 +47,11 @@ static inline struct vmcs12 *get_shadow_vmcs12(struct kvm_vcpu *vcpu)
return to_vmx(vcpu)->nested.cached_shadow_vmcs12; return to_vmx(vcpu)->nested.cached_shadow_vmcs12;
} }
/*
* Note: the same condition is checked against the state provided by userspace
* in vmx_set_nested_state; if it is satisfied, the nested state must include
* the VMCS12.
*/
static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu) static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
{ {
struct vcpu_vmx *vmx = to_vmx(vcpu); struct vcpu_vmx *vmx = to_vmx(vcpu);
......
...@@ -76,10 +76,8 @@ void set_default_state(struct kvm_nested_state *state) ...@@ -76,10 +76,8 @@ void set_default_state(struct kvm_nested_state *state)
void set_default_vmx_state(struct kvm_nested_state *state, int size) void set_default_vmx_state(struct kvm_nested_state *state, int size)
{ {
memset(state, 0, size); memset(state, 0, size);
state->flags = KVM_STATE_NESTED_GUEST_MODE |
KVM_STATE_NESTED_RUN_PENDING;
if (have_evmcs) if (have_evmcs)
state->flags |= KVM_STATE_NESTED_EVMCS; state->flags = KVM_STATE_NESTED_EVMCS;
state->format = 0; state->format = 0;
state->size = size; state->size = size;
state->hdr.vmx.vmxon_pa = 0x1000; state->hdr.vmx.vmxon_pa = 0x1000;
...@@ -148,6 +146,11 @@ void test_vmx_nested_state(struct kvm_vm *vm) ...@@ -148,6 +146,11 @@ void test_vmx_nested_state(struct kvm_vm *vm)
state->hdr.vmx.smm.flags = 1; state->hdr.vmx.smm.flags = 1;
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* Invalid flags are rejected. */
set_default_vmx_state(state, state_sz);
state->hdr.vmx.flags = ~0;
test_nested_state_expect_einval(vm, state);
/* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */ /* It is invalid to have vmxon_pa == -1ull and vmcs_pa != -1ull. */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = -1ull; state->hdr.vmx.vmxon_pa = -1ull;
...@@ -185,20 +188,41 @@ void test_vmx_nested_state(struct kvm_vm *vm) ...@@ -185,20 +188,41 @@ void test_vmx_nested_state(struct kvm_vm *vm)
state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE; state->hdr.vmx.smm.flags = KVM_STATE_NESTED_SMM_GUEST_MODE;
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* Size must be large enough to fit kvm_nested_state and vmcs12. */ /*
* Size must be large enough to fit kvm_nested_state and vmcs12
* if VMCS12 physical address is set
*/
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->size = sizeof(*state); state->size = sizeof(*state);
state->flags = 0;
test_nested_state_expect_einval(vm, state);
set_default_vmx_state(state, state_sz);
state->size = sizeof(*state);
state->flags = 0;
state->hdr.vmx.vmcs12_pa = -1;
test_nested_state(vm, state); test_nested_state(vm, state);
/* vmxon_pa cannot be the same address as vmcs_pa. */ /*
* KVM_SET_NESTED_STATE succeeds with invalid VMCS
* contents but L2 not running.
*/
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
state->hdr.vmx.vmxon_pa = 0; state->flags = 0;
state->hdr.vmx.vmcs12_pa = 0; test_nested_state(vm, state);
/* Invalid flags are rejected, even if no VMCS loaded. */
set_default_vmx_state(state, state_sz);
state->size = sizeof(*state);
state->flags = 0;
state->hdr.vmx.vmcs12_pa = -1;
state->hdr.vmx.flags = ~0;
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* The revision id for vmcs12 must be VMCS12_REVISION. */ /* vmxon_pa cannot be the same address as vmcs_pa. */
set_default_vmx_state(state, state_sz); set_default_vmx_state(state, state_sz);
set_revision_id_for_vmcs12(state, 0); state->hdr.vmx.vmxon_pa = 0;
state->hdr.vmx.vmcs12_pa = 0;
test_nested_state_expect_einval(vm, state); test_nested_state_expect_einval(vm, state);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment