Commit e9465549 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm

Pull kvm fixes from Paolo Bonzini:
 "ARM64:

   - Fix a regression with pKVM when kmemleak is enabled

   - Add Oliver Upton as an official KVM/arm64 reviewer

  selftests:

   - deal with compiler optimizations around hypervisor exits

  x86:

   - MAINTAINERS reorganization

   - Two SEV fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: SEV: Init target VMCBs in sev_migrate_from
  KVM: x86/svm: add __GFP_ACCOUNT to __sev_dbg_{en,de}crypt_user()
  MAINTAINERS: Reorganize KVM/x86 maintainership
  selftests: KVM: Handle compiler optimizations in ucall
  KVM: arm64: Add Oliver as a reviewer
  KVM: arm64: Prevent kmemleak from accessing pKVM memory
  tools/kvm_stat: fix display of error when multiple processes are found
parents 38bc4ac4 6defa24d
...@@ -10846,6 +10846,7 @@ M: Marc Zyngier <maz@kernel.org> ...@@ -10846,6 +10846,7 @@ M: Marc Zyngier <maz@kernel.org>
R: James Morse <james.morse@arm.com> R: James Morse <james.morse@arm.com>
R: Alexandru Elisei <alexandru.elisei@arm.com> R: Alexandru Elisei <alexandru.elisei@arm.com>
R: Suzuki K Poulose <suzuki.poulose@arm.com> R: Suzuki K Poulose <suzuki.poulose@arm.com>
R: Oliver Upton <oliver.upton@linux.dev>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu (moderated for non-subscribers) L: kvmarm@lists.cs.columbia.edu (moderated for non-subscribers)
S: Maintained S: Maintained
...@@ -10912,28 +10913,51 @@ F: tools/testing/selftests/kvm/*/s390x/ ...@@ -10912,28 +10913,51 @@ F: tools/testing/selftests/kvm/*/s390x/
F: tools/testing/selftests/kvm/s390x/ F: tools/testing/selftests/kvm/s390x/
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86) KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
M: Sean Christopherson <seanjc@google.com>
M: Paolo Bonzini <pbonzini@redhat.com> M: Paolo Bonzini <pbonzini@redhat.com>
R: Sean Christopherson <seanjc@google.com>
R: Vitaly Kuznetsov <vkuznets@redhat.com>
R: Wanpeng Li <wanpengli@tencent.com>
R: Jim Mattson <jmattson@google.com>
R: Joerg Roedel <joro@8bytes.org>
L: kvm@vger.kernel.org L: kvm@vger.kernel.org
S: Supported S: Supported
W: http://www.linux-kvm.org
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
F: arch/x86/include/asm/kvm* F: arch/x86/include/asm/kvm*
F: arch/x86/include/asm/pvclock-abi.h
F: arch/x86/include/asm/svm.h F: arch/x86/include/asm/svm.h
F: arch/x86/include/asm/vmx*.h F: arch/x86/include/asm/vmx*.h
F: arch/x86/include/uapi/asm/kvm* F: arch/x86/include/uapi/asm/kvm*
F: arch/x86/include/uapi/asm/svm.h F: arch/x86/include/uapi/asm/svm.h
F: arch/x86/include/uapi/asm/vmx.h F: arch/x86/include/uapi/asm/vmx.h
F: arch/x86/kernel/kvm.c
F: arch/x86/kernel/kvmclock.c
F: arch/x86/kvm/ F: arch/x86/kvm/
F: arch/x86/kvm/*/ F: arch/x86/kvm/*/
KVM PARAVIRT (KVM/paravirt)
M: Paolo Bonzini <pbonzini@redhat.com>
R: Wanpeng Li <wanpengli@tencent.com>
R: Vitaly Kuznetsov <vkuznets@redhat.com>
L: kvm@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
F: arch/x86/kernel/kvm.c
F: arch/x86/kernel/kvmclock.c
F: arch/x86/include/asm/pvclock-abi.h
F: include/linux/kvm_para.h
F: include/uapi/linux/kvm_para.h
F: include/uapi/asm-generic/kvm_para.h
F: include/asm-generic/kvm_para.h
F: arch/um/include/asm/kvm_para.h
F: arch/x86/include/asm/kvm_para.h
F: arch/x86/include/uapi/asm/kvm_para.h
KVM X86 HYPER-V (KVM/hyper-v)
M: Vitaly Kuznetsov <vkuznets@redhat.com>
M: Sean Christopherson <seanjc@google.com>
M: Paolo Bonzini <pbonzini@redhat.com>
L: kvm@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
F: arch/x86/kvm/hyperv.*
F: arch/x86/kvm/kvm_onhyperv.*
F: arch/x86/kvm/svm/hyperv.*
F: arch/x86/kvm/svm/svm_onhyperv.*
F: arch/x86/kvm/vmx/evmcs.*
KERNFS KERNFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Tejun Heo <tj@kernel.org> M: Tejun Heo <tj@kernel.org>
......
...@@ -2112,11 +2112,11 @@ static int finalize_hyp_mode(void) ...@@ -2112,11 +2112,11 @@ static int finalize_hyp_mode(void)
return 0; return 0;
/* /*
* Exclude HYP BSS from kmemleak so that it doesn't get peeked * Exclude HYP sections from kmemleak so that they don't get peeked
* at, which would end badly once the section is inaccessible. * at, which would end badly once inaccessible.
* None of other sections should ever be introspected.
*/ */
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start); kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size);
return pkvm_drop_host_privileges(); return pkvm_drop_host_privileges();
} }
......
...@@ -844,7 +844,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, ...@@ -844,7 +844,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
/* If source buffer is not aligned then use an intermediate buffer */ /* If source buffer is not aligned then use an intermediate buffer */
if (!IS_ALIGNED((unsigned long)vaddr, 16)) { if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
src_tpage = alloc_page(GFP_KERNEL); src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
if (!src_tpage) if (!src_tpage)
return -ENOMEM; return -ENOMEM;
...@@ -865,7 +865,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr, ...@@ -865,7 +865,7 @@ static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) { if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
int dst_offset; int dst_offset;
dst_tpage = alloc_page(GFP_KERNEL); dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
if (!dst_tpage) { if (!dst_tpage) {
ret = -ENOMEM; ret = -ENOMEM;
goto e_free; goto e_free;
...@@ -1665,19 +1665,24 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm) ...@@ -1665,19 +1665,24 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
{ {
struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info; struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info; struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
struct kvm_vcpu *dst_vcpu, *src_vcpu;
struct vcpu_svm *dst_svm, *src_svm;
struct kvm_sev_info *mirror; struct kvm_sev_info *mirror;
unsigned long i;
dst->active = true; dst->active = true;
dst->asid = src->asid; dst->asid = src->asid;
dst->handle = src->handle; dst->handle = src->handle;
dst->pages_locked = src->pages_locked; dst->pages_locked = src->pages_locked;
dst->enc_context_owner = src->enc_context_owner; dst->enc_context_owner = src->enc_context_owner;
dst->es_active = src->es_active;
src->asid = 0; src->asid = 0;
src->active = false; src->active = false;
src->handle = 0; src->handle = 0;
src->pages_locked = 0; src->pages_locked = 0;
src->enc_context_owner = NULL; src->enc_context_owner = NULL;
src->es_active = false;
list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list); list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
...@@ -1704,26 +1709,21 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm) ...@@ -1704,26 +1709,21 @@ static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
list_del(&src->mirror_entry); list_del(&src->mirror_entry);
list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms); list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
} }
}
static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
{ dst_svm = to_svm(dst_vcpu);
unsigned long i;
struct kvm_vcpu *dst_vcpu, *src_vcpu;
struct vcpu_svm *dst_svm, *src_svm;
if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus)) sev_init_vmcb(dst_svm);
return -EINVAL;
kvm_for_each_vcpu(i, src_vcpu, src) { if (!dst->es_active)
if (!src_vcpu->arch.guest_state_protected) continue;
return -EINVAL;
}
kvm_for_each_vcpu(i, src_vcpu, src) { /*
* Note, the source is not required to have the same number of
* vCPUs as the destination when migrating a vanilla SEV VM.
*/
src_vcpu = kvm_get_vcpu(dst_kvm, i);
src_svm = to_svm(src_vcpu); src_svm = to_svm(src_vcpu);
dst_vcpu = kvm_get_vcpu(dst, i);
dst_svm = to_svm(dst_vcpu);
/* /*
* Transfer VMSA and GHCB state to the destination. Nullify and * Transfer VMSA and GHCB state to the destination. Nullify and
...@@ -1740,8 +1740,23 @@ static int sev_es_migrate_from(struct kvm *dst, struct kvm *src) ...@@ -1740,8 +1740,23 @@ static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
src_svm->vmcb->control.vmsa_pa = INVALID_PAGE; src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
src_vcpu->arch.guest_state_protected = false; src_vcpu->arch.guest_state_protected = false;
} }
to_kvm_svm(src)->sev_info.es_active = false; }
to_kvm_svm(dst)->sev_info.es_active = true;
static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
{
struct kvm_vcpu *src_vcpu;
unsigned long i;
if (!sev_es_guest(src))
return 0;
if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
return -EINVAL;
kvm_for_each_vcpu(i, src_vcpu, src) {
if (!src_vcpu->arch.guest_state_protected)
return -EINVAL;
}
return 0; return 0;
} }
...@@ -1789,11 +1804,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) ...@@ -1789,11 +1804,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
if (ret) if (ret)
goto out_dst_vcpu; goto out_dst_vcpu;
if (sev_es_guest(source_kvm)) { ret = sev_check_source_vcpus(kvm, source_kvm);
ret = sev_es_migrate_from(kvm, source_kvm); if (ret)
if (ret) goto out_source_vcpu;
goto out_source_vcpu;
}
sev_migrate_from(kvm, source_kvm); sev_migrate_from(kvm, source_kvm);
kvm_vm_dead(source_kvm); kvm_vm_dead(source_kvm);
...@@ -2914,7 +2927,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in) ...@@ -2914,7 +2927,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
count, in); count, in);
} }
void sev_es_init_vmcb(struct vcpu_svm *svm) static void sev_es_init_vmcb(struct vcpu_svm *svm)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct kvm_vcpu *vcpu = &svm->vcpu;
...@@ -2967,6 +2980,15 @@ void sev_es_init_vmcb(struct vcpu_svm *svm) ...@@ -2967,6 +2980,15 @@ void sev_es_init_vmcb(struct vcpu_svm *svm)
} }
} }
void sev_init_vmcb(struct vcpu_svm *svm)
{
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
clr_exception_intercept(svm, UD_VECTOR);
if (sev_es_guest(svm->vcpu.kvm))
sev_es_init_vmcb(svm);
}
void sev_es_vcpu_reset(struct vcpu_svm *svm) void sev_es_vcpu_reset(struct vcpu_svm *svm)
{ {
/* /*
......
...@@ -1212,15 +1212,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu) ...@@ -1212,15 +1212,8 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
} }
if (sev_guest(vcpu->kvm)) { if (sev_guest(vcpu->kvm))
svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; sev_init_vmcb(svm);
clr_exception_intercept(svm, UD_VECTOR);
if (sev_es_guest(vcpu->kvm)) {
/* Perform SEV-ES specific VMCB updates */
sev_es_init_vmcb(svm);
}
}
svm_hv_init_vmcb(vmcb); svm_hv_init_vmcb(vmcb);
init_vmcb_after_set_cpuid(vcpu); init_vmcb_after_set_cpuid(vcpu);
......
...@@ -649,10 +649,10 @@ void __init sev_set_cpu_caps(void); ...@@ -649,10 +649,10 @@ void __init sev_set_cpu_caps(void);
void __init sev_hardware_setup(void); void __init sev_hardware_setup(void);
void sev_hardware_unsetup(void); void sev_hardware_unsetup(void);
int sev_cpu_init(struct svm_cpu_data *sd); int sev_cpu_init(struct svm_cpu_data *sd);
void sev_init_vmcb(struct vcpu_svm *svm);
void sev_free_vcpu(struct kvm_vcpu *vcpu); void sev_free_vcpu(struct kvm_vcpu *vcpu);
int sev_handle_vmgexit(struct kvm_vcpu *vcpu); int sev_handle_vmgexit(struct kvm_vcpu *vcpu);
int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in); int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in);
void sev_es_init_vmcb(struct vcpu_svm *svm);
void sev_es_vcpu_reset(struct vcpu_svm *svm); void sev_es_vcpu_reset(struct vcpu_svm *svm);
void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector);
void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa);
......
...@@ -1646,7 +1646,8 @@ Press any other key to refresh statistics immediately. ...@@ -1646,7 +1646,8 @@ Press any other key to refresh statistics immediately.
.format(values)) .format(values))
if len(pids) > 1: if len(pids) > 1:
sys.exit('Error: Multiple processes found (pids: {}). Use "-p"' sys.exit('Error: Multiple processes found (pids: {}). Use "-p"'
' to specify the desired pid'.format(" ".join(pids))) ' to specify the desired pid'
.format(" ".join(map(str, pids))))
namespace.pid = pids[0] namespace.pid = pids[0]
argparser = argparse.ArgumentParser(description=description_text, argparser = argparse.ArgumentParser(description=description_text,
......
...@@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm) ...@@ -73,20 +73,19 @@ void ucall_uninit(struct kvm_vm *vm)
void ucall(uint64_t cmd, int nargs, ...) void ucall(uint64_t cmd, int nargs, ...)
{ {
struct ucall uc = { struct ucall uc = {};
.cmd = cmd,
};
va_list va; va_list va;
int i; int i;
WRITE_ONCE(uc.cmd, cmd);
nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS; nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
va_start(va, nargs); va_start(va, nargs);
for (i = 0; i < nargs; ++i) for (i = 0; i < nargs; ++i)
uc.args[i] = va_arg(va, uint64_t); WRITE_ONCE(uc.args[i], va_arg(va, uint64_t));
va_end(va); va_end(va);
*ucall_exit_mmio_addr = (vm_vaddr_t)&uc; WRITE_ONCE(*ucall_exit_mmio_addr, (vm_vaddr_t)&uc);
} }
uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc) uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment