Commit 7fd55a02 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-5.17' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 updates for Linux 5.16

- Simplification of the 'vcpu first run' by integrating it into
  KVM's 'pid change' flow

- Refactoring of the FP and SVE state tracking, also leading to
  a simpler state and less shared data between EL1 and EL2 in
  the nVHE case

- Tidy up the header file usage for the nvhe hyp object

- New HYP unsharing mechanism, finally allowing pages to be
  unmapped from the Stage-1 EL2 page-tables

- Various pKVM cleanups around refcounting and sharing

- A couple of vgic fixes for bugs that would trigger once
  the vcpu xarray rework is merged, but not sooner

- Add minimal support for ARMv8.7's PMU extension

- Rework kvm_pgtable initialisation ahead of the NV work

- New selftest for IRQ injection

- Teach selftests about the lack of default IPA space and
  page sizes

- Expand sysreg selftest to deal with Pointer Authentication

- The usual bunch of cleanups and doc update
parents 5e4e84f1 1c53a1ae
...@@ -63,6 +63,7 @@ enum __kvm_host_smccc_func { ...@@ -63,6 +63,7 @@ enum __kvm_host_smccc_func {
/* Hypercalls available after pKVM finalisation */ /* Hypercalls available after pKVM finalisation */
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp, __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc, __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run, __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context, __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
......
...@@ -388,7 +388,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) ...@@ -388,7 +388,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else { } else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
sctlr |= (1 << 25); sctlr |= SCTLR_ELx_EE;
vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
} }
} }
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/kvm.h> #include <asm/kvm.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
#include <asm/thread_info.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED #define __KVM_HAVE_ARCH_INTC_INITIALIZED
...@@ -298,9 +297,6 @@ struct kvm_vcpu_arch { ...@@ -298,9 +297,6 @@ struct kvm_vcpu_arch {
/* Exception Information */ /* Exception Information */
struct kvm_vcpu_fault_info fault; struct kvm_vcpu_fault_info fault;
/* State of various workarounds, see kvm_asm.h for bit assignment */
u64 workaround_flags;
/* Miscellaneous vcpu state flags */ /* Miscellaneous vcpu state flags */
u64 flags; u64 flags;
...@@ -321,8 +317,8 @@ struct kvm_vcpu_arch { ...@@ -321,8 +317,8 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state; struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state; struct kvm_guest_debug_arch external_debug_state;
struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
struct task_struct *parent_task;
struct { struct {
/* {Break,watch}point registers */ /* {Break,watch}point registers */
...@@ -367,9 +363,6 @@ struct kvm_vcpu_arch { ...@@ -367,9 +363,6 @@ struct kvm_vcpu_arch {
int target; int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Detect first run of a vcpu */
bool has_run_once;
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */ /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2; u64 vsesr_el2;
...@@ -411,20 +404,17 @@ struct kvm_vcpu_arch { ...@@ -411,20 +404,17 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_DEBUG_DIRTY (1 << 0) #define KVM_ARM64_DEBUG_DIRTY (1 << 0)
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */ #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */ #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */ #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */ #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */ #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */ #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */ #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
/*
* Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
* set together with an exception...
*/
#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
#define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */ #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
KVM_GUESTDBG_USE_HW | \
KVM_GUESTDBG_SINGLESTEP)
/* /*
* When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
* take the following values: * take the following values:
...@@ -442,11 +432,14 @@ struct kvm_vcpu_arch { ...@@ -442,11 +432,14 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11) #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
#define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11) #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
/* #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
* Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
* set together with an exception... #define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
*/
#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */ #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
KVM_GUESTDBG_USE_HW | \
KVM_GUESTDBG_SINGLESTEP)
#define vcpu_has_sve(vcpu) (system_supports_sve() && \ #define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE)) ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
...@@ -606,6 +599,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, ...@@ -606,6 +599,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
void kvm_arm_halt_guest(struct kvm *kvm); void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm); void kvm_arm_resume_guest(struct kvm *kvm);
#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
#ifndef __KVM_NVHE_HYPERVISOR__ #ifndef __KVM_NVHE_HYPERVISOR__
#define kvm_call_hyp_nvhe(f, ...) \ #define kvm_call_hyp_nvhe(f, ...) \
({ \ ({ \
...@@ -736,8 +731,10 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, ...@@ -736,8 +731,10 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
/* Guest/host FPSIMD coordination helpers */ /* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
{ {
...@@ -748,12 +745,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) ...@@ -748,12 +745,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu); void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */ #ifdef CONFIG_KVM
static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
return kvm_arch_vcpu_run_map_fp(vcpu);
}
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr); void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u32 clr); void kvm_clr_pmu_events(u32 clr);
......
...@@ -90,7 +90,6 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu); ...@@ -90,7 +90,6 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
void __sve_save_state(void *sve_pffr, u32 *fpsr);
void __sve_restore_state(void *sve_pffr, u32 *fpsr); void __sve_restore_state(void *sve_pffr, u32 *fpsr);
#ifndef __KVM_NVHE_HYPERVISOR__ #ifndef __KVM_NVHE_HYPERVISOR__
......
...@@ -150,6 +150,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) ...@@ -150,6 +150,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#include <asm/kvm_pgtable.h> #include <asm/kvm_pgtable.h>
#include <asm/stage2_pgtable.h> #include <asm/stage2_pgtable.h>
int kvm_share_hyp(void *from, void *to);
void kvm_unshare_hyp(void *from, void *to);
int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot); int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **kaddr, void __iomem **kaddr,
......
...@@ -251,6 +251,27 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt); ...@@ -251,6 +251,27 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt);
int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
enum kvm_pgtable_prot prot); enum kvm_pgtable_prot prot);
/**
* kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
* @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
* @addr: Virtual address from which to remove the mapping.
* @size: Size of the mapping.
*
* The offset of @addr within a page is ignored, @size is rounded-up to
* the next page boundary and @phys is rounded-down to the previous page
* boundary.
*
* TLB invalidation is performed for each page-table entry cleared during the
* unmapping operation and the reference count for the page-table page
* containing the cleared entry is decremented, with unreferenced pages being
* freed. The unmapping operation will stop early if it encounters either an
* invalid page-table entry or a valid block mapping which maps beyond the range
* being unmapped.
*
* Return: Number of bytes unmapped, which may be 0.
*/
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
/** /**
* kvm_get_vtcr() - Helper to construct VTCR_EL2 * kvm_get_vtcr() - Helper to construct VTCR_EL2
* @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register. * @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
...@@ -270,8 +291,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); ...@@ -270,8 +291,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
/** /**
* __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table. * __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
* @pgt: Uninitialised page-table structure to initialise. * @pgt: Uninitialised page-table structure to initialise.
* @arch: Arch-specific KVM structure representing the guest virtual * @mmu: S2 MMU context for this S2 translation
* machine.
* @mm_ops: Memory management callbacks. * @mm_ops: Memory management callbacks.
* @flags: Stage-2 configuration flags. * @flags: Stage-2 configuration flags.
* @force_pte_cb: Function that returns true if page level mappings must * @force_pte_cb: Function that returns true if page level mappings must
...@@ -279,13 +299,13 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift); ...@@ -279,13 +299,13 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
* *
* Return: 0 on success, negative error code on failure. * Return: 0 on success, negative error code on failure.
*/ */
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops, struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags, enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb); kvm_pgtable_force_pte_cb_t force_pte_cb);
#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \ #define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
__kvm_pgtable_stage2_init(pgt, arch, mm_ops, 0, NULL) __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
/** /**
* kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table. * kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 - Google LLC
* Author: Quentin Perret <qperret@google.com>
*/
#ifndef __ARM64_KVM_PKVM_H__
#define __ARM64_KVM_PKVM_H__
#include <linux/memblock.h>
#include <asm/kvm_pgtable.h>
#define HYP_MEMBLOCK_REGIONS 128
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
{
unsigned long total = 0, i;
/* Provision the worst case scenario */
for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
total += nr_pages;
}
return total;
}
static inline unsigned long __hyp_pgtable_total_pages(void)
{
unsigned long res = 0, i;
/* Cover all of memory with page-granularity */
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
}
return res;
}
static inline unsigned long hyp_s1_pgtable_pages(void)
{
unsigned long res;
res = __hyp_pgtable_total_pages();
/* Allow 1 GiB for private mappings */
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
return res;
}
static inline unsigned long host_s2_pgtable_pages(void)
{
unsigned long res;
/*
* Include an extra 16 pages to safely upper-bound the worst case of
* concatenated pgds.
*/
res = __hyp_pgtable_total_pages() + 16;
/* Allow 1 GiB for MMIO mappings */
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
return res;
}
#endif /* __ARM64_KVM_PKVM_H__ */
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/refcount.h> #include <linux/refcount.h>
#include <asm/cpufeature.h>
typedef struct { typedef struct {
atomic64_t id; atomic64_t id;
......
...@@ -937,6 +937,7 @@ ...@@ -937,6 +937,7 @@
#define ID_AA64DFR0_PMUVER_8_1 0x4 #define ID_AA64DFR0_PMUVER_8_1 0x4
#define ID_AA64DFR0_PMUVER_8_4 0x5 #define ID_AA64DFR0_PMUVER_8_4 0x5
#define ID_AA64DFR0_PMUVER_8_5 0x6 #define ID_AA64DFR0_PMUVER_8_5 0x6
#define ID_AA64DFR0_PMUVER_8_7 0x7
#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf #define ID_AA64DFR0_PMUVER_IMP_DEF 0xf
#define ID_AA64DFR0_PMSVER_8_2 0x1 #define ID_AA64DFR0_PMSVER_8_2 0x1
......
...@@ -111,7 +111,6 @@ int main(void) ...@@ -111,7 +111,6 @@ int main(void)
#ifdef CONFIG_KVM #ifdef CONFIG_KVM
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1)); DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2)); DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1]));
......
...@@ -78,7 +78,11 @@ ...@@ -78,7 +78,11 @@
* indicate whether or not the userland FPSIMD state of the current task is * indicate whether or not the userland FPSIMD state of the current task is
* present in the registers. The flag is set unless the FPSIMD registers of this * present in the registers. The flag is set unless the FPSIMD registers of this
* CPU currently contain the most recent userland FPSIMD state of the current * CPU currently contain the most recent userland FPSIMD state of the current
* task. * task. If the task is behaving as a VMM, then this is will be managed by
* KVM which will clear it to indicate that the vcpu FPSIMD state is currently
* loaded on the CPU, allowing the state to be saved if a FPSIMD-aware
* softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and
* flag the register state as invalid.
* *
* In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
* save the task's FPSIMD context back to task_struct from softirq context. * save the task's FPSIMD context back to task_struct from softirq context.
......
# SPDX-License-Identifier: GPL-2.0-only
hyp_constants.h
...@@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/ ...@@ -13,7 +13,7 @@ obj-$(CONFIG_KVM) += hyp/
kvm-y += arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \ kvm-y += arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \ inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o \ guest.o debug.o reset.o sys_regs.o \
vgic-sys-reg-v3.o fpsimd.o pmu.o \ vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
arch_timer.o trng.o\ arch_timer.o trng.o\
vgic/vgic.o vgic/vgic-init.o \ vgic/vgic.o vgic/vgic-init.o \
vgic/vgic-irqfd.o vgic/vgic-v2.o \ vgic/vgic-irqfd.o vgic/vgic-v2.o \
...@@ -23,3 +23,19 @@ kvm-y += arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \ ...@@ -23,3 +23,19 @@ kvm-y += arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
vgic/vgic-its.o vgic/vgic-debug.o vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
always-y := hyp_constants.h hyp-constants.s
define rule_gen_hyp_constants
$(call filechk,offsets,__HYP_CONSTANTS_H__)
endef
CFLAGS_hyp-constants.o = -I $(srctree)/$(src)/hyp/include
$(obj)/hyp-constants.s: $(src)/hyp/hyp-constants.c FORCE
$(call if_changed_dep,cc_s_c)
$(obj)/hyp_constants.h: $(obj)/hyp-constants.s FORCE
$(call if_changed_rule,gen_hyp_constants)
obj-kvm := $(addprefix $(obj)/, $(kvm-y))
$(obj-kvm): $(obj)/hyp_constants.h
...@@ -146,7 +146,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) ...@@ -146,7 +146,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret) if (ret)
return ret; return ret;
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); ret = kvm_share_hyp(kvm, kvm + 1);
if (ret) if (ret)
goto out_free_stage2_pgd; goto out_free_stage2_pgd;
...@@ -180,6 +180,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) ...@@ -180,6 +180,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kvm_vgic_destroy(kvm); kvm_vgic_destroy(kvm);
kvm_destroy_vcpus(kvm); kvm_destroy_vcpus(kvm);
kvm_unshare_hyp(kvm, kvm + 1);
} }
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
...@@ -334,7 +336,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) ...@@ -334,7 +336,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err) if (err)
return err; return err;
return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP); return kvm_share_hyp(vcpu, vcpu + 1);
} }
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
...@@ -343,7 +345,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) ...@@ -343,7 +345,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
static_branch_dec(&userspace_irqchip_in_use); static_branch_dec(&userspace_irqchip_in_use);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
...@@ -561,18 +563,33 @@ static void update_vmid(struct kvm_vmid *vmid) ...@@ -561,18 +563,33 @@ static void update_vmid(struct kvm_vmid *vmid)
spin_unlock(&kvm_vmid_lock); spin_unlock(&kvm_vmid_lock);
} }
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
return vcpu->arch.target >= 0;
}
/*
* Handle both the initialisation that is being done when the vcpu is
* run for the first time, as well as the updates that must be
* performed each time we get a new thread dealing with this vcpu.
*/
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{ {
struct kvm *kvm = vcpu->kvm; struct kvm *kvm = vcpu->kvm;
int ret = 0; int ret;
if (likely(vcpu->arch.has_run_once)) if (!kvm_vcpu_initialized(vcpu))
return 0; return -ENOEXEC;
if (!kvm_arm_vcpu_is_finalized(vcpu)) if (!kvm_arm_vcpu_is_finalized(vcpu))
return -EPERM; return -EPERM;
vcpu->arch.has_run_once = true; ret = kvm_arch_vcpu_run_map_fp(vcpu);
if (ret)
return ret;
if (likely(vcpu_has_run_once(vcpu)))
return 0;
kvm_arm_vcpu_init_debug(vcpu); kvm_arm_vcpu_init_debug(vcpu);
...@@ -584,12 +601,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -584,12 +601,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_resources(kvm); ret = kvm_vgic_map_resources(kvm);
if (ret) if (ret)
return ret; return ret;
} else {
/*
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
static_branch_inc(&userspace_irqchip_in_use);
} }
ret = kvm_timer_enable(vcpu); ret = kvm_timer_enable(vcpu);
...@@ -597,6 +608,16 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) ...@@ -597,6 +608,16 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
return ret; return ret;
ret = kvm_arm_pmu_v3_enable(vcpu); ret = kvm_arm_pmu_v3_enable(vcpu);
if (ret)
return ret;
if (!irqchip_in_kernel(kvm)) {
/*
* Tell the rest of the code that there are userspace irqchip
* VMs in the wild.
*/
static_branch_inc(&userspace_irqchip_in_use);
}
/* /*
* Initialize traps for protected VMs. * Initialize traps for protected VMs.
...@@ -689,11 +710,6 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) ...@@ -689,11 +710,6 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
preempt_enable(); preempt_enable();
} }
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
return vcpu->arch.target >= 0;
}
static void check_vcpu_requests(struct kvm_vcpu *vcpu) static void check_vcpu_requests(struct kvm_vcpu *vcpu)
{ {
if (kvm_request_pending(vcpu)) { if (kvm_request_pending(vcpu)) {
...@@ -789,13 +805,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -789,13 +805,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run; struct kvm_run *run = vcpu->run;
int ret; int ret;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
ret = kvm_vcpu_first_run_init(vcpu);
if (ret)
return ret;
if (run->exit_reason == KVM_EXIT_MMIO) { if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu); ret = kvm_handle_mmio_return(vcpu);
if (ret) if (ret)
...@@ -859,6 +868,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -859,6 +868,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
} }
kvm_arm_setup_debug(vcpu); kvm_arm_setup_debug(vcpu);
kvm_arch_vcpu_ctxflush_fp(vcpu);
/************************************************************** /**************************************************************
* Enter the guest * Enter the guest
...@@ -1133,7 +1143,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, ...@@ -1133,7 +1143,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* need to invalidate the I-cache though, as FWB does *not* * need to invalidate the I-cache though, as FWB does *not*
* imply CTR_EL0.DIC. * imply CTR_EL0.DIC.
*/ */
if (vcpu->arch.has_run_once) { if (vcpu_has_run_once(vcpu)) {
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm); stage2_unmap_vm(vcpu->kvm);
else else
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
*/ */
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/kvm_asm.h> #include <asm/kvm_asm.h>
...@@ -15,6 +14,19 @@ ...@@ -15,6 +14,19 @@
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu)
{
struct task_struct *p = vcpu->arch.parent_task;
struct user_fpsimd_state *fpsimd;
if (!is_protected_kvm_enabled() || !p)
return;
fpsimd = &p->thread.uw.fpsimd_state;
kvm_unshare_hyp(fpsimd, fpsimd + 1);
put_task_struct(p);
}
/* /*
* Called on entry to KVM_RUN unless this vcpu previously ran at least * Called on entry to KVM_RUN unless this vcpu previously ran at least
* once and the most recent prior KVM_RUN for this vcpu was called from * once and the most recent prior KVM_RUN for this vcpu was called from
...@@ -28,36 +40,29 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) ...@@ -28,36 +40,29 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
{ {
int ret; int ret;
struct thread_info *ti = &current->thread_info;
struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state; struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
/* kvm_vcpu_unshare_task_fp(vcpu);
* Make sure the host task thread flags and fpsimd state are
* visible to hyp:
*/
ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP);
if (ret)
goto error;
ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP); /* Make sure the host task fpsimd state is visible to hyp: */
ret = kvm_share_hyp(fpsimd, fpsimd + 1);
if (ret) if (ret)
goto error; return ret;
if (vcpu->arch.sve_state) {
void *sve_end;
sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu); vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end, /*
PAGE_HYP); * We need to keep current's task_struct pinned until its data has been
if (ret) * unshared with the hypervisor to make sure it is not re-used by the
goto error; * kernel and donated to someone else while already shared -- see
* kvm_vcpu_unshare_task_fp() for the matching put_task_struct().
*/
if (is_protected_kvm_enabled()) {
get_task_struct(current);
vcpu->arch.parent_task = current;
} }
vcpu->arch.host_thread_info = kern_hyp_va(ti); return 0;
vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
error:
return ret;
} }
/* /*
...@@ -66,26 +71,27 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu) ...@@ -66,26 +71,27 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
* *
* Here, we just set the correct metadata to indicate that the FPSIMD * Here, we just set the correct metadata to indicate that the FPSIMD
* state in the cpu regs (if any) belongs to current on the host. * state in the cpu regs (if any) belongs to current on the host.
*
* TIF_SVE is backed up here, since it may get clobbered with guest state.
* This flag is restored by kvm_arch_vcpu_put_fp(vcpu).
*/ */
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
{ {
BUG_ON(!current->mm); BUG_ON(!current->mm);
BUG_ON(test_thread_flag(TIF_SVE));
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED;
KVM_ARM64_HOST_SVE_IN_USE |
KVM_ARM64_HOST_SVE_ENABLED);
vcpu->arch.flags |= KVM_ARM64_FP_HOST; vcpu->arch.flags |= KVM_ARM64_FP_HOST;
if (test_thread_flag(TIF_SVE))
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN) if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED; vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
} }
void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
{
if (test_thread_flag(TIF_FOREIGN_FPSTATE))
vcpu->arch.flags |= KVM_ARM64_FP_FOREIGN_FPSTATE;
else
vcpu->arch.flags &= ~KVM_ARM64_FP_FOREIGN_FPSTATE;
}
/* /*
* If the guest FPSIMD state was loaded, update the host's context * If the guest FPSIMD state was loaded, update the host's context
* tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu * tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu
...@@ -115,13 +121,11 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) ...@@ -115,13 +121,11 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
{ {
unsigned long flags; unsigned long flags;
bool host_has_sve = system_supports_sve();
bool guest_has_sve = vcpu_has_sve(vcpu);
local_irq_save(flags); local_irq_save(flags);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
if (guest_has_sve) { if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR); __vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
/* Restore the VL that was saved when bound to the CPU */ /* Restore the VL that was saved when bound to the CPU */
...@@ -131,7 +135,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) ...@@ -131,7 +135,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
} }
fpsimd_save_and_flush_cpu_state(); fpsimd_save_and_flush_cpu_state();
} else if (has_vhe() && host_has_sve) { } else if (has_vhe() && system_supports_sve()) {
/* /*
* The FPSIMD/SVE state in the CPU has not been touched, and we * The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
...@@ -145,8 +149,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) ...@@ -145,8 +149,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
} }
update_thread_flag(TIF_SVE, update_thread_flag(TIF_SVE, 0);
vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \ ...@@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \ -DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o reserved_mem.o obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
...@@ -25,9 +25,3 @@ SYM_FUNC_START(__sve_restore_state) ...@@ -25,9 +25,3 @@ SYM_FUNC_START(__sve_restore_state)
sve_load 0, x1, x2, 3 sve_load 0, x1, x2, 3
ret ret
SYM_FUNC_END(__sve_restore_state) SYM_FUNC_END(__sve_restore_state)
SYM_FUNC_START(__sve_save_state)
mov x2, #1
sve_save 0, x1, x2, 3
ret
SYM_FUNC_END(__sve_save_state)
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kbuild.h>
#include <nvhe/memory.h>
int main(void)
{
DEFINE(STRUCT_HYP_PAGE_SIZE, sizeof(struct hyp_page));
return 0;
}
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/thread_info.h>
struct kvm_exception_table_entry { struct kvm_exception_table_entry {
int insn, fixup; int insn, fixup;
...@@ -49,7 +48,7 @@ static inline bool update_fp_enabled(struct kvm_vcpu *vcpu) ...@@ -49,7 +48,7 @@ static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
* trap the accesses. * trap the accesses.
*/ */
if (!system_supports_fpsimd() || if (!system_supports_fpsimd() ||
vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE) vcpu->arch.flags & KVM_ARM64_FP_FOREIGN_FPSTATE)
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_FP_HOST); KVM_ARM64_FP_HOST);
...@@ -143,16 +142,6 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) ...@@ -143,16 +142,6 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
} }
static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu)
{
struct thread_struct *thread;
thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct,
uw.fpsimd_state);
__sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr);
}
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
{ {
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
...@@ -169,21 +158,14 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) ...@@ -169,21 +158,14 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
*/ */
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{ {
bool sve_guest, sve_host; bool sve_guest;
u8 esr_ec; u8 esr_ec;
u64 reg; u64 reg;
if (!system_supports_fpsimd()) if (!system_supports_fpsimd())
return false; return false;
if (system_supports_sve()) {
sve_guest = vcpu_has_sve(vcpu); sve_guest = vcpu_has_sve(vcpu);
sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
} else {
sve_guest = false;
sve_host = false;
}
esr_ec = kvm_vcpu_trap_get_class(vcpu); esr_ec = kvm_vcpu_trap_get_class(vcpu);
/* Don't handle SVE traps for non-SVE vcpus here: */ /* Don't handle SVE traps for non-SVE vcpus here: */
...@@ -207,11 +189,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) ...@@ -207,11 +189,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb(); isb();
if (vcpu->arch.flags & KVM_ARM64_FP_HOST) { if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
if (sve_host)
__hyp_sve_save_host(vcpu);
else
__fpsimd_save_state(vcpu->arch.host_fpsimd_state); __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST; vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
} }
......
...@@ -24,6 +24,11 @@ enum pkvm_page_state { ...@@ -24,6 +24,11 @@ enum pkvm_page_state {
PKVM_PAGE_OWNED = 0ULL, PKVM_PAGE_OWNED = 0ULL,
PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0, PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0,
PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1, PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1,
__PKVM_PAGE_RESERVED = KVM_PGTABLE_PROT_SW0 |
KVM_PGTABLE_PROT_SW1,
/* Meta-states which aren't encoded directly in the PTE's SW bits */
PKVM_NOPAGE,
}; };
#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1) #define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
...@@ -50,6 +55,7 @@ extern const u8 pkvm_hyp_id; ...@@ -50,6 +55,7 @@ extern const u8 pkvm_hyp_id;
int __pkvm_prot_finalize(void); int __pkvm_prot_finalize(void);
int __pkvm_host_share_hyp(u64 pfn); int __pkvm_host_share_hyp(u64 pfn);
int __pkvm_host_unshare_hyp(u64 pfn);
bool addr_is_memory(phys_addr_t phys); bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot); int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
......
...@@ -10,13 +10,8 @@ ...@@ -10,13 +10,8 @@
#include <nvhe/memory.h> #include <nvhe/memory.h>
#include <nvhe/spinlock.h> #include <nvhe/spinlock.h>
#define HYP_MEMBLOCK_REGIONS 128
extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
extern struct kvm_pgtable pkvm_pgtable; extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock; extern hyp_spinlock_t pkvm_pgd_lock;
extern struct hyp_pool hpool;
extern u64 __io_map_base;
int hyp_create_idmap(u32 hyp_va_bits); int hyp_create_idmap(u32 hyp_va_bits);
int hyp_map_vectors(void); int hyp_map_vectors(void);
...@@ -39,58 +34,4 @@ static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size, ...@@ -39,58 +34,4 @@ static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
*end = ALIGN(*end, PAGE_SIZE); *end = ALIGN(*end, PAGE_SIZE);
} }
static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
{
unsigned long total = 0, i;
/* Provision the worst case scenario */
for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
total += nr_pages;
}
return total;
}
static inline unsigned long __hyp_pgtable_total_pages(void)
{
unsigned long res = 0, i;
/* Cover all of memory with page-granularity */
for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
}
return res;
}
static inline unsigned long hyp_s1_pgtable_pages(void)
{
unsigned long res;
res = __hyp_pgtable_total_pages();
/* Allow 1 GiB for private mappings */
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
return res;
}
static inline unsigned long host_s2_pgtable_pages(void)
{
unsigned long res;
/*
* Include an extra 16 pages to safely upper-bound the worst case of
* concatenated pgds.
*/
res = __hyp_pgtable_total_pages() + 16;
/* Allow 1 GiB for MMIO mappings */
res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
return res;
}
#endif /* __KVM_HYP_MM_H */ #endif /* __KVM_HYP_MM_H */
...@@ -43,6 +43,9 @@ void *hyp_early_alloc_page(void *arg) ...@@ -43,6 +43,9 @@ void *hyp_early_alloc_page(void *arg)
return hyp_early_alloc_contig(1); return hyp_early_alloc_contig(1);
} }
static void hyp_early_alloc_get_page(void *addr) { }
static void hyp_early_alloc_put_page(void *addr) { }
void hyp_early_alloc_init(void *virt, unsigned long size) void hyp_early_alloc_init(void *virt, unsigned long size)
{ {
base = cur = (unsigned long)virt; base = cur = (unsigned long)virt;
...@@ -51,4 +54,6 @@ void hyp_early_alloc_init(void *virt, unsigned long size) ...@@ -51,4 +54,6 @@ void hyp_early_alloc_init(void *virt, unsigned long size)
hyp_early_alloc_mm_ops.zalloc_page = hyp_early_alloc_page; hyp_early_alloc_mm_ops.zalloc_page = hyp_early_alloc_page;
hyp_early_alloc_mm_ops.phys_to_virt = hyp_phys_to_virt; hyp_early_alloc_mm_ops.phys_to_virt = hyp_phys_to_virt;
hyp_early_alloc_mm_ops.virt_to_phys = hyp_virt_to_phys; hyp_early_alloc_mm_ops.virt_to_phys = hyp_virt_to_phys;
hyp_early_alloc_mm_ops.get_page = hyp_early_alloc_get_page;
hyp_early_alloc_mm_ops.put_page = hyp_early_alloc_put_page;
} }
...@@ -147,6 +147,13 @@ static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt) ...@@ -147,6 +147,13 @@ static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn); cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
} }
static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, pfn, host_ctxt, 1);
cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn);
}
static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt) static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
{ {
DECLARE_REG(phys_addr_t, phys, host_ctxt, 1); DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
...@@ -184,6 +191,7 @@ static const hcall_t host_hcall[] = { ...@@ -184,6 +191,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_prot_finalize), HANDLE_FUNC(__pkvm_prot_finalize),
HANDLE_FUNC(__pkvm_host_share_hyp), HANDLE_FUNC(__pkvm_host_share_hyp),
HANDLE_FUNC(__pkvm_host_unshare_hyp),
HANDLE_FUNC(__kvm_adjust_pc), HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_vcpu_run), HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context), HANDLE_FUNC(__kvm_flush_vm_context),
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h> #include <asm/kvm_pgtable.h>
#include <asm/kvm_pkvm.h>
#include <asm/stage2_pgtable.h> #include <asm/stage2_pgtable.h>
#include <hyp/fault.h> #include <hyp/fault.h>
...@@ -27,6 +28,26 @@ static struct hyp_pool host_s2_pool; ...@@ -27,6 +28,26 @@ static struct hyp_pool host_s2_pool;
const u8 pkvm_hyp_id = 1; const u8 pkvm_hyp_id = 1;
static void host_lock_component(void)
{
hyp_spin_lock(&host_kvm.lock);
}
static void host_unlock_component(void)
{
hyp_spin_unlock(&host_kvm.lock);
}
static void hyp_lock_component(void)
{
hyp_spin_lock(&pkvm_pgd_lock);
}
static void hyp_unlock_component(void)
{
hyp_spin_unlock(&pkvm_pgd_lock);
}
static void *host_s2_zalloc_pages_exact(size_t size) static void *host_s2_zalloc_pages_exact(size_t size)
{ {
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
...@@ -103,19 +124,19 @@ int kvm_host_prepare_stage2(void *pgt_pool_base) ...@@ -103,19 +124,19 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
prepare_host_vtcr(); prepare_host_vtcr();
hyp_spin_lock_init(&host_kvm.lock); hyp_spin_lock_init(&host_kvm.lock);
mmu->arch = &host_kvm.arch;
ret = prepare_s2_pool(pgt_pool_base); ret = prepare_s2_pool(pgt_pool_base);
if (ret) if (ret)
return ret; return ret;
ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch, ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
&host_kvm.mm_ops, KVM_HOST_S2_FLAGS, &host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
host_stage2_force_pte_cb); host_stage2_force_pte_cb);
if (ret) if (ret)
return ret; return ret;
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd); mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
mmu->arch = &host_kvm.arch;
mmu->pgt = &host_kvm.pgt; mmu->pgt = &host_kvm.pgt;
WRITE_ONCE(mmu->vmid.vmid_gen, 0); WRITE_ONCE(mmu->vmid.vmid_gen, 0);
WRITE_ONCE(mmu->vmid.vmid, 0); WRITE_ONCE(mmu->vmid.vmid, 0);
...@@ -338,116 +359,446 @@ static int host_stage2_idmap(u64 addr) ...@@ -338,116 +359,446 @@ static int host_stage2_idmap(u64 addr)
prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT; prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
hyp_spin_lock(&host_kvm.lock); host_lock_component();
ret = host_stage2_adjust_range(addr, &range); ret = host_stage2_adjust_range(addr, &range);
if (ret) if (ret)
goto unlock; goto unlock;
ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
unlock: unlock:
hyp_spin_unlock(&host_kvm.lock); host_unlock_component();
return ret; return ret;
} }
static inline bool check_prot(enum kvm_pgtable_prot prot, void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
enum kvm_pgtable_prot required,
enum kvm_pgtable_prot denied)
{ {
return (prot & (required | denied)) == required; struct kvm_vcpu_fault_info fault;
u64 esr, addr;
int ret = 0;
esr = read_sysreg_el2(SYS_ESR);
BUG_ON(!__get_fault_info(esr, &fault));
addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
ret = host_stage2_idmap(addr);
BUG_ON(ret && ret != -EAGAIN);
} }
int __pkvm_host_share_hyp(u64 pfn) /* This corresponds to locking order */
enum pkvm_component_id {
PKVM_ID_HOST,
PKVM_ID_HYP,
};
struct pkvm_mem_transition {
u64 nr_pages;
struct {
enum pkvm_component_id id;
/* Address in the initiator's address space */
u64 addr;
union {
struct {
/* Address in the completer's address space */
u64 completer_addr;
} host;
};
} initiator;
struct {
enum pkvm_component_id id;
} completer;
};
struct pkvm_mem_share {
const struct pkvm_mem_transition tx;
const enum kvm_pgtable_prot completer_prot;
};
struct check_walk_data {
enum pkvm_page_state desired;
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
};
static int __check_page_state_visitor(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag,
void * const arg)
{ {
phys_addr_t addr = hyp_pfn_to_phys(pfn); struct check_walk_data *d = arg;
enum kvm_pgtable_prot prot, cur; kvm_pte_t pte = *ptep;
void *virt = __hyp_va(addr);
enum pkvm_page_state state;
kvm_pte_t pte;
int ret;
if (!addr_is_memory(addr)) if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte)))
return -EINVAL; return -EINVAL;
hyp_spin_lock(&host_kvm.lock); return d->get_page_state(pte) == d->desired ? 0 : -EPERM;
hyp_spin_lock(&pkvm_pgd_lock); }
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
struct check_walk_data *data)
{
struct kvm_pgtable_walker walker = {
.cb = __check_page_state_visitor,
.arg = data,
.flags = KVM_PGTABLE_WALK_LEAF,
};
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
{
if (!kvm_pte_valid(pte) && pte)
return PKVM_NOPAGE;
return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
}
static int __host_check_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
struct check_walk_data d = {
.desired = state,
.get_page_state = host_get_page_state,
};
hyp_assert_lock_held(&host_kvm.lock);
return check_page_state_range(&host_kvm.pgt, addr, size, &d);
}
static int __host_set_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
return host_stage2_idmap_locked(addr, size, prot);
}
static int host_request_owned_transition(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
static int host_request_unshare(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
static int host_initiate_share(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
static int host_initiate_unshare(u64 *completer_addr,
const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
*completer_addr = tx->initiator.host.completer_addr;
return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
{
if (!kvm_pte_valid(pte))
return PKVM_NOPAGE;
return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
}
static int __hyp_check_page_state_range(u64 addr, u64 size,
enum pkvm_page_state state)
{
struct check_walk_data d = {
.desired = state,
.get_page_state = hyp_get_page_state,
};
hyp_assert_lock_held(&pkvm_pgd_lock);
return check_page_state_range(&pkvm_pgtable, addr, size, &d);
}
static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
{
return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
tx->initiator.id != PKVM_ID_HOST);
}
static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (perms != PAGE_HYP)
return -EPERM;
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
}
static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
return __hyp_check_page_state_range(addr, size,
PKVM_PAGE_SHARED_BORROWED);
}
static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
enum kvm_pgtable_prot prot;
prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
return pkvm_create_mappings_locked(start, end, prot);
}
static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
return (ret != size) ? -EFAULT : 0;
}
static int check_share(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_request_owned_transition(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}
ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, NULL);
if (ret) if (ret)
goto unlock; return ret;
if (!pte)
goto map_shared;
/* switch (tx->completer.id) {
* Check attributes in the host stage-2 PTE. We need the page to be: case PKVM_ID_HYP:
* - mapped RWX as we're sharing memory; ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
* - not borrowed, as that implies absence of ownership. break;
* Otherwise, we can't let it got through default:
*/ ret = -EINVAL;
cur = kvm_pgtable_stage2_pte_prot(pte);
prot = pkvm_mkstate(0, PKVM_PAGE_SHARED_BORROWED);
if (!check_prot(cur, PKVM_HOST_MEM_PROT, prot)) {
ret = -EPERM;
goto unlock;
} }
state = pkvm_getstate(cur); return ret;
if (state == PKVM_PAGE_OWNED) }
goto map_shared;
/* static int __do_share(struct pkvm_mem_share *share)
* Tolerate double-sharing the same page, but this requires {
* cross-checking the hypervisor stage-1. const struct pkvm_mem_transition *tx = &share->tx;
*/ u64 completer_addr;
if (state != PKVM_PAGE_SHARED_OWNED) { int ret;
ret = -EPERM;
goto unlock; switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_initiate_share(&completer_addr, tx);
break;
default:
ret = -EINVAL;
} }
ret = kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)virt, &pte, NULL);
if (ret) if (ret)
goto unlock; return ret;
/* switch (tx->completer.id) {
* If the page has been shared with the hypervisor, it must be case PKVM_ID_HYP:
* already mapped as SHARED_BORROWED in its stage-1. ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
break;
default:
ret = -EINVAL;
}
return ret;
}
/*
* do_share():
*
* The page owner grants access to another component with a given set
* of permissions.
*
* Initiator: OWNED => SHARED_OWNED
* Completer: NOPAGE => SHARED_BORROWED
*/ */
cur = kvm_pgtable_hyp_pte_prot(pte); static int do_share(struct pkvm_mem_share *share)
prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED); {
if (!check_prot(cur, prot, ~prot)) int ret;
ret = -EPERM;
goto unlock;
map_shared: ret = check_share(share);
/* if (ret)
* If the page is not yet shared, adjust mappings in both page-tables return ret;
* while both locks are held.
return WARN_ON(__do_share(share));
}
static int check_unshare(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_request_unshare(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
switch (tx->completer.id) {
case PKVM_ID_HYP:
ret = hyp_ack_unshare(completer_addr, tx);
break;
default:
ret = -EINVAL;
}
return ret;
}
static int __do_unshare(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
ret = host_initiate_unshare(&completer_addr, tx);
break;
default:
ret = -EINVAL;
}
if (ret)
return ret;
switch (tx->completer.id) {
case PKVM_ID_HYP:
ret = hyp_complete_unshare(completer_addr, tx);
break;
default:
ret = -EINVAL;
}
return ret;
}
/*
* do_unshare():
*
* The page owner revokes access from another component for a range of
* pages which were previously shared using do_share().
*
* Initiator: SHARED_OWNED => OWNED
* Completer: SHARED_BORROWED => NOPAGE
*/ */
prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED); static int do_unshare(struct pkvm_mem_share *share)
ret = pkvm_create_mappings_locked(virt, virt + PAGE_SIZE, prot); {
BUG_ON(ret); int ret;
prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED); ret = check_unshare(share);
ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot); if (ret)
BUG_ON(ret); return ret;
unlock: return WARN_ON(__do_unshare(share));
hyp_spin_unlock(&pkvm_pgd_lock); }
hyp_spin_unlock(&host_kvm.lock);
int __pkvm_host_share_hyp(u64 pfn)
{
int ret;
u64 host_addr = hyp_pfn_to_phys(pfn);
u64 hyp_addr = (u64)__hyp_va(host_addr);
struct pkvm_mem_share share = {
.tx = {
.nr_pages = 1,
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
.host = {
.completer_addr = hyp_addr,
},
},
.completer = {
.id = PKVM_ID_HYP,
},
},
.completer_prot = PAGE_HYP,
};
host_lock_component();
hyp_lock_component();
ret = do_share(&share);
hyp_unlock_component();
host_unlock_component();
return ret; return ret;
} }
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) int __pkvm_host_unshare_hyp(u64 pfn)
{ {
struct kvm_vcpu_fault_info fault; int ret;
u64 esr, addr; u64 host_addr = hyp_pfn_to_phys(pfn);
int ret = 0; u64 hyp_addr = (u64)__hyp_va(host_addr);
struct pkvm_mem_share share = {
.tx = {
.nr_pages = 1,
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
.host = {
.completer_addr = hyp_addr,
},
},
.completer = {
.id = PKVM_ID_HYP,
},
},
.completer_prot = PAGE_HYP,
};
esr = read_sysreg_el2(SYS_ESR); host_lock_component();
BUG_ON(!__get_fault_info(esr, &fault)); hyp_lock_component();
addr = (fault.hpfar_el2 & HPFAR_MASK) << 8; ret = do_unshare(&share);
ret = host_stage2_idmap(addr);
BUG_ON(ret && ret != -EAGAIN); hyp_unlock_component();
host_unlock_component();
return ret;
} }
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h> #include <asm/kvm_pgtable.h>
#include <asm/kvm_pkvm.h>
#include <asm/spectre.h> #include <asm/spectre.h>
#include <nvhe/early_alloc.h> #include <nvhe/early_alloc.h>
...@@ -18,11 +19,12 @@ ...@@ -18,11 +19,12 @@
struct kvm_pgtable pkvm_pgtable; struct kvm_pgtable pkvm_pgtable;
hyp_spinlock_t pkvm_pgd_lock; hyp_spinlock_t pkvm_pgd_lock;
u64 __io_map_base;
struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS]; struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
unsigned int hyp_memblock_nr; unsigned int hyp_memblock_nr;
static u64 __io_map_base;
static int __pkvm_create_mappings(unsigned long start, unsigned long size, static int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot) unsigned long phys, enum kvm_pgtable_prot prot)
{ {
......
...@@ -241,7 +241,7 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages, ...@@ -241,7 +241,7 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
int i; int i;
hyp_spin_lock_init(&pool->lock); hyp_spin_lock_init(&pool->lock);
pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT)); pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
for (i = 0; i < pool->max_order; i++) for (i = 0; i < pool->max_order; i++)
INIT_LIST_HEAD(&pool->free_area[i]); INIT_LIST_HEAD(&pool->free_area[i]);
pool->range_start = phys; pool->range_start = phys;
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h> #include <asm/kvm_pgtable.h>
#include <asm/kvm_pkvm.h>
#include <nvhe/early_alloc.h> #include <nvhe/early_alloc.h>
#include <nvhe/fixed_config.h> #include <nvhe/fixed_config.h>
...@@ -17,7 +18,6 @@ ...@@ -17,7 +18,6 @@
#include <nvhe/mm.h> #include <nvhe/mm.h>
#include <nvhe/trap_handler.h> #include <nvhe/trap_handler.h>
struct hyp_pool hpool;
unsigned long hyp_nr_cpus; unsigned long hyp_nr_cpus;
#define hyp_percpu_size ((unsigned long)__per_cpu_end - \ #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
...@@ -27,6 +27,7 @@ static void *vmemmap_base; ...@@ -27,6 +27,7 @@ static void *vmemmap_base;
static void *hyp_pgt_base; static void *hyp_pgt_base;
static void *host_s2_pgt_base; static void *host_s2_pgt_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops; static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
static int divide_memory_pool(void *virt, unsigned long size) static int divide_memory_pool(void *virt, unsigned long size)
{ {
...@@ -165,6 +166,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, ...@@ -165,6 +166,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
enum kvm_pgtable_walk_flags flag, enum kvm_pgtable_walk_flags flag,
void * const arg) void * const arg)
{ {
struct kvm_pgtable_mm_ops *mm_ops = arg;
enum kvm_pgtable_prot prot; enum kvm_pgtable_prot prot;
enum pkvm_page_state state; enum pkvm_page_state state;
kvm_pte_t pte = *ptep; kvm_pte_t pte = *ptep;
...@@ -173,6 +175,15 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level, ...@@ -173,6 +175,15 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
if (!kvm_pte_valid(pte)) if (!kvm_pte_valid(pte))
return 0; return 0;
/*
* Fix-up the refcount for the page-table pages as the early allocator
* was unable to access the hyp_vmemmap and so the buddy allocator has
* initialised the refcount to '1'.
*/
mm_ops->get_page(ptep);
if (flag != KVM_PGTABLE_WALK_LEAF)
return 0;
if (level != (KVM_PGTABLE_MAX_LEVELS - 1)) if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL; return -EINVAL;
...@@ -205,7 +216,8 @@ static int finalize_host_mappings(void) ...@@ -205,7 +216,8 @@ static int finalize_host_mappings(void)
{ {
struct kvm_pgtable_walker walker = { struct kvm_pgtable_walker walker = {
.cb = finalize_host_mappings_walker, .cb = finalize_host_mappings_walker,
.flags = KVM_PGTABLE_WALK_LEAF, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
.arg = pkvm_pgtable.mm_ops,
}; };
int i, ret; int i, ret;
...@@ -240,19 +252,20 @@ void __noreturn __pkvm_init_finalise(void) ...@@ -240,19 +252,20 @@ void __noreturn __pkvm_init_finalise(void)
if (ret) if (ret)
goto out; goto out;
ret = finalize_host_mappings();
if (ret)
goto out;
pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) { pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
.zalloc_page = hyp_zalloc_hyp_page, .zalloc_page = hyp_zalloc_hyp_page,
.phys_to_virt = hyp_phys_to_virt, .phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys, .virt_to_phys = hyp_virt_to_phys,
.get_page = hpool_get_page, .get_page = hpool_get_page,
.put_page = hpool_put_page, .put_page = hpool_put_page,
.page_count = hyp_page_count,
}; };
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops; pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
ret = finalize_host_mappings();
if (ret)
goto out;
out: out:
/* /*
* We tail-called to here from handle___pkvm_init() and will not return, * We tail-called to here from handle___pkvm_init() and will not return,
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/thread_info.h>
#include <nvhe/fixed_config.h> #include <nvhe/fixed_config.h>
#include <nvhe/mem_protect.h> #include <nvhe/mem_protect.h>
......
...@@ -383,21 +383,6 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte) ...@@ -383,21 +383,6 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
return prot; return prot;
} }
static bool hyp_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
{
/*
* Tolerate KVM recreating the exact same mapping, or changing software
* bits if the existing mapping was valid.
*/
if (old == new)
return false;
if (!kvm_pte_valid(old))
return true;
return !WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW);
}
static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level, static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep, struct hyp_map_data *data) kvm_pte_t *ptep, struct hyp_map_data *data)
{ {
...@@ -407,11 +392,16 @@ static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level, ...@@ -407,11 +392,16 @@ static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
if (!kvm_block_mapping_supported(addr, end, phys, level)) if (!kvm_block_mapping_supported(addr, end, phys, level))
return false; return false;
data->phys += granule;
new = kvm_init_valid_leaf_pte(phys, data->attr, level); new = kvm_init_valid_leaf_pte(phys, data->attr, level);
if (hyp_pte_needs_update(old, new)) if (old == new)
smp_store_release(ptep, new); return true;
if (!kvm_pte_valid(old))
data->mm_ops->get_page(ptep);
else if (WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
return false;
data->phys += granule; smp_store_release(ptep, new);
return true; return true;
} }
...@@ -433,6 +423,7 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, ...@@ -433,6 +423,7 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
return -ENOMEM; return -ENOMEM;
kvm_set_table_pte(ptep, childp, mm_ops); kvm_set_table_pte(ptep, childp, mm_ops);
mm_ops->get_page(ptep);
return 0; return 0;
} }
...@@ -460,6 +451,69 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, ...@@ -460,6 +451,69 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
return ret; return ret;
} }
struct hyp_unmap_data {
u64 unmapped;
struct kvm_pgtable_mm_ops *mm_ops;
};
static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
kvm_pte_t pte = *ptep, *childp = NULL;
u64 granule = kvm_granule_size(level);
struct hyp_unmap_data *data = arg;
struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
if (!kvm_pte_valid(pte))
return -EINVAL;
if (kvm_pte_table(pte, level)) {
childp = kvm_pte_follow(pte, mm_ops);
if (mm_ops->page_count(childp) != 1)
return 0;
kvm_clear_pte(ptep);
dsb(ishst);
__tlbi_level(vae2is, __TLBI_VADDR(addr, 0), level);
} else {
if (end - addr < granule)
return -EINVAL;
kvm_clear_pte(ptep);
dsb(ishst);
__tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
data->unmapped += granule;
}
dsb(ish);
isb();
mm_ops->put_page(ptep);
if (childp)
mm_ops->put_page(childp);
return 0;
}
u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
{
struct hyp_unmap_data unmap_data = {
.mm_ops = pgt->mm_ops,
};
struct kvm_pgtable_walker walker = {
.cb = hyp_unmap_walker,
.arg = &unmap_data,
.flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
};
if (!pgt->mm_ops->page_count)
return 0;
kvm_pgtable_walk(pgt, addr, size, &walker);
return unmap_data.unmapped;
}
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits, int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
struct kvm_pgtable_mm_ops *mm_ops) struct kvm_pgtable_mm_ops *mm_ops)
{ {
...@@ -482,8 +536,16 @@ static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep, ...@@ -482,8 +536,16 @@ static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg) enum kvm_pgtable_walk_flags flag, void * const arg)
{ {
struct kvm_pgtable_mm_ops *mm_ops = arg; struct kvm_pgtable_mm_ops *mm_ops = arg;
kvm_pte_t pte = *ptep;
if (!kvm_pte_valid(pte))
return 0;
mm_ops->put_page(ptep);
if (kvm_pte_table(pte, level))
mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops));
return 0; return 0;
} }
...@@ -491,7 +553,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) ...@@ -491,7 +553,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
{ {
struct kvm_pgtable_walker walker = { struct kvm_pgtable_walker walker = {
.cb = hyp_free_walker, .cb = hyp_free_walker,
.flags = KVM_PGTABLE_WALK_TABLE_POST, .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
.arg = pgt->mm_ops, .arg = pgt->mm_ops,
}; };
...@@ -1116,13 +1178,13 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) ...@@ -1116,13 +1178,13 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
} }
int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops, struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags, enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb) kvm_pgtable_force_pte_cb_t force_pte_cb)
{ {
size_t pgd_sz; size_t pgd_sz;
u64 vtcr = arch->vtcr; u64 vtcr = mmu->arch->vtcr;
u32 ia_bits = VTCR_EL2_IPA(vtcr); u32 ia_bits = VTCR_EL2_IPA(vtcr);
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
...@@ -1135,7 +1197,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch, ...@@ -1135,7 +1197,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
pgt->ia_bits = ia_bits; pgt->ia_bits = ia_bits;
pgt->start_level = start_level; pgt->start_level = start_level;
pgt->mm_ops = mm_ops; pgt->mm_ops = mm_ops;
pgt->mmu = &arch->mmu; pgt->mmu = mmu;
pgt->flags = flags; pgt->flags = flags;
pgt->force_pte_cb = force_pte_cb; pgt->force_pte_cb = force_pte_cb;
......
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/thread_info.h>
/* VHE specific context */ /* VHE specific context */
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data); DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
......
...@@ -239,6 +239,9 @@ void free_hyp_pgds(void) ...@@ -239,6 +239,9 @@ void free_hyp_pgds(void)
static bool kvm_host_owns_hyp_mappings(void) static bool kvm_host_owns_hyp_mappings(void)
{ {
if (is_kernel_in_hyp_mode())
return false;
if (static_branch_likely(&kvm_protected_mode_initialized)) if (static_branch_likely(&kvm_protected_mode_initialized))
return false; return false;
...@@ -281,14 +284,117 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr) ...@@ -281,14 +284,117 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
} }
} }
static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end) struct hyp_shared_pfn {
u64 pfn;
int count;
struct rb_node node;
};
static DEFINE_MUTEX(hyp_shared_pfns_lock);
static struct rb_root hyp_shared_pfns = RB_ROOT;
static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
struct rb_node **parent)
{ {
phys_addr_t addr; struct hyp_shared_pfn *this;
*node = &hyp_shared_pfns.rb_node;
*parent = NULL;
while (**node) {
this = container_of(**node, struct hyp_shared_pfn, node);
*parent = **node;
if (this->pfn < pfn)
*node = &((**node)->rb_left);
else if (this->pfn > pfn)
*node = &((**node)->rb_right);
else
return this;
}
return NULL;
}
static int share_pfn_hyp(u64 pfn)
{
struct rb_node **node, *parent;
struct hyp_shared_pfn *this;
int ret = 0;
mutex_lock(&hyp_shared_pfns_lock);
this = find_shared_pfn(pfn, &node, &parent);
if (this) {
this->count++;
goto unlock;
}
this = kzalloc(sizeof(*this), GFP_KERNEL);
if (!this) {
ret = -ENOMEM;
goto unlock;
}
this->pfn = pfn;
this->count = 1;
rb_link_node(&this->node, parent, node);
rb_insert_color(&this->node, &hyp_shared_pfns);
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
unlock:
mutex_unlock(&hyp_shared_pfns_lock);
return ret;
}
static int unshare_pfn_hyp(u64 pfn)
{
struct rb_node **node, *parent;
struct hyp_shared_pfn *this;
int ret = 0;
mutex_lock(&hyp_shared_pfns_lock);
this = find_shared_pfn(pfn, &node, &parent);
if (WARN_ON(!this)) {
ret = -ENOENT;
goto unlock;
}
this->count--;
if (this->count)
goto unlock;
rb_erase(&this->node, &hyp_shared_pfns);
kfree(this);
ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1);
unlock:
mutex_unlock(&hyp_shared_pfns_lock);
return ret;
}
int kvm_share_hyp(void *from, void *to)
{
phys_addr_t start, end, cur;
u64 pfn;
int ret; int ret;
for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) { if (is_kernel_in_hyp_mode())
ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, return 0;
__phys_to_pfn(addr));
/*
* The share hcall maps things in the 'fixed-offset' region of the hyp
* VA space, so we can only share physically contiguous data-structures
* for now.
*/
if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to))
return -EINVAL;
if (kvm_host_owns_hyp_mappings())
return create_hyp_mappings(from, to, PAGE_HYP);
start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
end = PAGE_ALIGN(__pa(to));
for (cur = start; cur < end; cur += PAGE_SIZE) {
pfn = __phys_to_pfn(cur);
ret = share_pfn_hyp(pfn);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -296,6 +402,22 @@ static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end) ...@@ -296,6 +402,22 @@ static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
return 0; return 0;
} }
void kvm_unshare_hyp(void *from, void *to)
{
phys_addr_t start, end, cur;
u64 pfn;
if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from)
return;
start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
end = PAGE_ALIGN(__pa(to));
for (cur = start; cur < end; cur += PAGE_SIZE) {
pfn = __phys_to_pfn(cur);
WARN_ON(unshare_pfn_hyp(pfn));
}
}
/** /**
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range * @from: The virtual kernel start address of the range
...@@ -316,12 +438,8 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot) ...@@ -316,12 +438,8 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
if (is_kernel_in_hyp_mode()) if (is_kernel_in_hyp_mode())
return 0; return 0;
if (!kvm_host_owns_hyp_mappings()) { if (!kvm_host_owns_hyp_mappings())
if (WARN_ON(prot != PAGE_HYP))
return -EPERM; return -EPERM;
return pkvm_share_hyp(kvm_kaddr_to_phys(from),
kvm_kaddr_to_phys(to));
}
start = start & PAGE_MASK; start = start & PAGE_MASK;
end = PAGE_ALIGN(end); end = PAGE_ALIGN(end);
...@@ -407,6 +525,9 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, ...@@ -407,6 +525,9 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
unsigned long addr; unsigned long addr;
int ret; int ret;
if (is_protected_kvm_enabled())
return -EPERM;
*kaddr = ioremap(phys_addr, size); *kaddr = ioremap(phys_addr, size);
if (!*kaddr) if (!*kaddr)
return -ENOMEM; return -ENOMEM;
...@@ -516,7 +637,8 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) ...@@ -516,7 +637,8 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
if (!pgt) if (!pgt)
return -ENOMEM; return -ENOMEM;
err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops); mmu->arch = &kvm->arch;
err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
if (err) if (err)
goto out_free_pgtable; goto out_free_pgtable;
...@@ -529,7 +651,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) ...@@ -529,7 +651,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
mmu->arch = &kvm->arch;
mmu->pgt = pgt; mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd); mmu->pgd_phys = __pa(pgt->pgd);
WRITE_ONCE(mmu->vmid.vmid_gen, 0); WRITE_ONCE(mmu->vmid.vmid_gen, 0);
...@@ -650,6 +771,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, ...@@ -650,6 +771,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_R |
(writable ? KVM_PGTABLE_PROT_W : 0); (writable ? KVM_PGTABLE_PROT_W : 0);
if (is_protected_kvm_enabled())
return -EPERM;
size += offset_in_page(guest_ipa); size += offset_in_page(guest_ipa);
guest_ipa &= PAGE_MASK; guest_ipa &= PAGE_MASK;
......
...@@ -8,10 +8,9 @@ ...@@ -8,10 +8,9 @@
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <asm/kvm_host.h> #include <asm/kvm_pkvm.h>
#include <nvhe/memory.h> #include "hyp_constants.h"
#include <nvhe/mm.h>
static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory); static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr); static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
...@@ -82,7 +81,8 @@ void __init kvm_hyp_reserve(void) ...@@ -82,7 +81,8 @@ void __init kvm_hyp_reserve(void)
do { do {
prev = nr_pages; prev = nr_pages;
nr_pages = hyp_mem_pages + prev; nr_pages = hyp_mem_pages + prev;
nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE); nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
PAGE_SIZE);
nr_pages += __hyp_pgtable_max_pages(nr_pages); nr_pages += __hyp_pgtable_max_pages(nr_pages);
} while (nr_pages != prev); } while (nr_pages != prev);
hyp_mem_pages += nr_pages; hyp_mem_pages += nr_pages;
......
...@@ -28,6 +28,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) ...@@ -28,6 +28,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
case ID_AA64DFR0_PMUVER_8_1: case ID_AA64DFR0_PMUVER_8_1:
case ID_AA64DFR0_PMUVER_8_4: case ID_AA64DFR0_PMUVER_8_4:
case ID_AA64DFR0_PMUVER_8_5: case ID_AA64DFR0_PMUVER_8_5:
case ID_AA64DFR0_PMUVER_8_7:
return GENMASK(15, 0); return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */ default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver); WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
......
...@@ -109,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) ...@@ -109,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
/* /*
* Make sure the reset request is observed if the change to * Make sure the reset request is observed if the change to
* power_state is observed. * power_off is observed.
*/ */
smp_wmb(); smp_wmb();
......
...@@ -94,22 +94,31 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) ...@@ -94,22 +94,31 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
{ {
void *buf; void *buf;
unsigned int vl; unsigned int vl;
size_t reg_sz;
int ret;
vl = vcpu->arch.sve_max_vl; vl = vcpu->arch.sve_max_vl;
/* /*
* Responsibility for these properties is shared between * Responsibility for these properties is shared between
* kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
* set_sve_vls(). Double-check here just to be sure: * set_sve_vls(). Double-check here just to be sure:
*/ */
if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() || if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
vl > SVE_VL_ARCH_MAX)) vl > SVE_VL_ARCH_MAX))
return -EIO; return -EIO;
buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL_ACCOUNT); reg_sz = vcpu_sve_state_size(vcpu);
buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
ret = kvm_share_hyp(buf, buf + reg_sz);
if (ret) {
kfree(buf);
return ret;
}
vcpu->arch.sve_state = buf; vcpu->arch.sve_state = buf;
vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
return 0; return 0;
...@@ -141,7 +150,13 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu) ...@@ -141,7 +150,13 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{ {
kfree(vcpu->arch.sve_state); void *sve_state = vcpu->arch.sve_state;
kvm_vcpu_unshare_task_fp(vcpu);
kvm_unshare_hyp(vcpu, vcpu + 1);
if (sve_state)
kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
kfree(sve_state);
} }
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
...@@ -193,10 +208,9 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu) ...@@ -193,10 +208,9 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
* kvm_reset_vcpu - sets core registers and sys_regs to reset value * kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer * @vcpu: The VCPU pointer
* *
* This function finds the right table above and sets the registers on * This function sets the registers on the virtual CPU struct to their
* the virtual CPU struct to their architecturally defined reset * architecturally defined reset values, except for registers whose reset is
* values, except for registers whose reset is deferred until * deferred until kvm_arm_vcpu_finalize().
* kvm_arm_vcpu_finalize().
* *
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
* ioctl or as part of handling a request issued by another VCPU in the PSCI * ioctl or as part of handling a request issued by another VCPU in the PSCI
......
...@@ -92,7 +92,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type) ...@@ -92,7 +92,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
return ret; return ret;
kvm_for_each_vcpu(i, vcpu, kvm) { kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.has_run_once) if (vcpu_has_run_once(vcpu))
goto out_unlock; goto out_unlock;
} }
ret = 0; ret = 0;
......
...@@ -764,10 +764,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm) ...@@ -764,10 +764,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
} }
if (ret) { if (ret) {
/* The current c failed, so we start with the previous one. */ /* The current c failed, so iterate over the previous ones. */
int i;
mutex_lock(&kvm->slots_lock); mutex_lock(&kvm->slots_lock);
for (c--; c >= 0; c--) { for (i = 0; i < c; i++) {
vcpu = kvm_get_vcpu(kvm, c); vcpu = kvm_get_vcpu(kvm, i);
vgic_unregister_redist_iodev(vcpu); vgic_unregister_redist_iodev(vcpu);
} }
mutex_unlock(&kvm->slots_lock); mutex_unlock(&kvm->slots_lock);
......
...@@ -1050,7 +1050,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, ...@@ -1050,7 +1050,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
return 0; return 0;
} }
struct kvm_io_device_ops kvm_io_gic_ops = { const struct kvm_io_device_ops kvm_io_gic_ops = {
.read = dispatch_mmio_read, .read = dispatch_mmio_read,
.write = dispatch_mmio_write, .write = dispatch_mmio_write,
}; };
......
...@@ -34,7 +34,7 @@ struct vgic_register_region { ...@@ -34,7 +34,7 @@ struct vgic_register_region {
}; };
}; };
extern struct kvm_io_device_ops kvm_io_gic_ops; extern const struct kvm_io_device_ops kvm_io_gic_ops;
#define VGIC_ACCESS_8bit 1 #define VGIC_ACCESS_8bit 1
#define VGIC_ACCESS_32bit 2 #define VGIC_ACCESS_32bit 2
......
...@@ -293,12 +293,12 @@ int vgic_v2_map_resources(struct kvm *kvm) ...@@ -293,12 +293,12 @@ int vgic_v2_map_resources(struct kvm *kvm)
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) || if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) { IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
kvm_err("Need to set vgic cpu and dist addresses first\n"); kvm_debug("Need to set vgic cpu and dist addresses first\n");
return -ENXIO; return -ENXIO;
} }
if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) { if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
kvm_err("VGIC CPU and dist frames overlap\n"); kvm_debug("VGIC CPU and dist frames overlap\n");
return -EINVAL; return -EINVAL;
} }
...@@ -345,6 +345,11 @@ int vgic_v2_probe(const struct gic_kvm_info *info) ...@@ -345,6 +345,11 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
int ret; int ret;
u32 vtr; u32 vtr;
if (is_protected_kvm_enabled()) {
kvm_err("GICv2 not supported in protected mode\n");
return -ENXIO;
}
if (!info->vctrl.start) { if (!info->vctrl.start) {
kvm_err("GICH not present in the firmware table\n"); kvm_err("GICH not present in the firmware table\n");
return -ENXIO; return -ENXIO;
......
...@@ -554,12 +554,12 @@ int vgic_v3_map_resources(struct kvm *kvm) ...@@ -554,12 +554,12 @@ int vgic_v3_map_resources(struct kvm *kvm)
} }
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) { if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
kvm_err("Need to set vgic distributor addresses first\n"); kvm_debug("Need to set vgic distributor addresses first\n");
return -ENXIO; return -ENXIO;
} }
if (!vgic_v3_check_base(kvm)) { if (!vgic_v3_check_base(kvm)) {
kvm_err("VGIC redist and dist frames overlap\n"); kvm_debug("VGIC redist and dist frames overlap\n");
return -EINVAL; return -EINVAL;
} }
...@@ -651,7 +651,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info) ...@@ -651,7 +651,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
} else if (!PAGE_ALIGNED(info->vcpu.start)) { } else if (!PAGE_ALIGNED(info->vcpu.start)) {
pr_warn("GICV physical address 0x%llx not page aligned\n", pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start); (unsigned long long)info->vcpu.start);
} else { } else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
kvm_vgic_global_state.vcpu_base = info->vcpu.start; kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true; kvm_vgic_global_state.can_emulate_gicv2 = true;
ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2); ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
......
...@@ -5,9 +5,11 @@ ...@@ -5,9 +5,11 @@
#ifndef __KVM_ARM_VGIC_H #ifndef __KVM_ARM_VGIC_H
#define __KVM_ARM_VGIC_H #define __KVM_ARM_VGIC_H
#include <linux/kernel.h> #include <linux/bits.h>
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/irqreturn.h> #include <linux/irqreturn.h>
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/static_key.h> #include <linux/static_key.h>
#include <linux/types.h> #include <linux/types.h>
......
...@@ -1322,7 +1322,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); ...@@ -1322,7 +1322,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn); bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier { struct kvm_irq_ack_notifier {
struct hlist_node link; struct hlist_node link;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
/aarch64/get-reg-list /aarch64/get-reg-list
/aarch64/psci_cpu_on_test /aarch64/psci_cpu_on_test
/aarch64/vgic_init /aarch64/vgic_init
/aarch64/vgic_irq
/s390x/memop /s390x/memop
/s390x/resets /s390x/resets
/s390x/sync_regs_test /s390x/sync_regs_test
......
...@@ -96,6 +96,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions ...@@ -96,6 +96,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
TEST_GEN_PROGS_aarch64 += aarch64/vgic_init TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += demand_paging_test
TEST_GEN_PROGS_aarch64 += dirty_log_test TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
......
...@@ -382,7 +382,7 @@ static struct kvm_vm *test_vm_create(void) ...@@ -382,7 +382,7 @@ static struct kvm_vm *test_vm_create(void)
ucall_init(vm, NULL); ucall_init(vm, NULL);
test_init_timer_irq(vm); test_init_timer_irq(vm);
vgic_v3_setup(vm, nr_vcpus, GICD_BASE_GPA, GICR_BASE_GPA); vgic_v3_setup(vm, nr_vcpus, 64, GICD_BASE_GPA, GICR_BASE_GPA);
/* Make all the test's cmdline args visible to the guest */ /* Make all the test's cmdline args visible to the guest */
sync_global_to_guest(vm, test_args); sync_global_to_guest(vm, test_args);
......
...@@ -1014,6 +1014,22 @@ static __u64 sve_rejects_set[] = { ...@@ -1014,6 +1014,22 @@ static __u64 sve_rejects_set[] = {
KVM_REG_ARM64_SVE_VLS, KVM_REG_ARM64_SVE_VLS,
}; };
static __u64 pauth_addr_regs[] = {
ARM64_SYS_REG(3, 0, 2, 1, 0), /* APIAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 1), /* APIAKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 2), /* APIBKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 1, 3), /* APIBKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 0), /* APDAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 1), /* APDAKEYHI_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 2), /* APDBKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 2, 3) /* APDBKEYHI_EL1 */
};
static __u64 pauth_generic_regs[] = {
ARM64_SYS_REG(3, 0, 2, 3, 0), /* APGAKEYLO_EL1 */
ARM64_SYS_REG(3, 0, 2, 3, 1), /* APGAKEYHI_EL1 */
};
#define BASE_SUBLIST \ #define BASE_SUBLIST \
{ "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), } { "base", .regs = base_regs, .regs_n = ARRAY_SIZE(base_regs), }
#define VREGS_SUBLIST \ #define VREGS_SUBLIST \
...@@ -1025,6 +1041,21 @@ static __u64 sve_rejects_set[] = { ...@@ -1025,6 +1041,21 @@ static __u64 sve_rejects_set[] = {
{ "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \ { "sve", .capability = KVM_CAP_ARM_SVE, .feature = KVM_ARM_VCPU_SVE, .finalize = true, \
.regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \ .regs = sve_regs, .regs_n = ARRAY_SIZE(sve_regs), \
.rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), } .rejects_set = sve_rejects_set, .rejects_set_n = ARRAY_SIZE(sve_rejects_set), }
#define PAUTH_SUBLIST \
{ \
.name = "pauth_address", \
.capability = KVM_CAP_ARM_PTRAUTH_ADDRESS, \
.feature = KVM_ARM_VCPU_PTRAUTH_ADDRESS, \
.regs = pauth_addr_regs, \
.regs_n = ARRAY_SIZE(pauth_addr_regs), \
}, \
{ \
.name = "pauth_generic", \
.capability = KVM_CAP_ARM_PTRAUTH_GENERIC, \
.feature = KVM_ARM_VCPU_PTRAUTH_GENERIC, \
.regs = pauth_generic_regs, \
.regs_n = ARRAY_SIZE(pauth_generic_regs), \
}
static struct vcpu_config vregs_config = { static struct vcpu_config vregs_config = {
.sublists = { .sublists = {
...@@ -1056,11 +1087,30 @@ static struct vcpu_config sve_pmu_config = { ...@@ -1056,11 +1087,30 @@ static struct vcpu_config sve_pmu_config = {
{0}, {0},
}, },
}; };
static struct vcpu_config pauth_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
PAUTH_SUBLIST,
{0},
},
};
static struct vcpu_config pauth_pmu_config = {
.sublists = {
BASE_SUBLIST,
VREGS_SUBLIST,
PAUTH_SUBLIST,
PMU_SUBLIST,
{0},
},
};
static struct vcpu_config *vcpu_configs[] = { static struct vcpu_config *vcpu_configs[] = {
&vregs_config, &vregs_config,
&vregs_pmu_config, &vregs_pmu_config,
&sve_config, &sve_config,
&sve_pmu_config, &sve_pmu_config,
&pauth_config,
&pauth_pmu_config,
}; };
static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs); static int vcpu_configs_n = ARRAY_SIZE(vcpu_configs);
// SPDX-License-Identifier: GPL-2.0
/*
* vgic_irq.c - Test userspace injection of IRQs
*
* This test validates the injection of IRQs from userspace using various
* methods (e.g., KVM_IRQ_LINE) and modes (e.g., EOI). The guest "asks" the
* host to inject a specific intid via a GUEST_SYNC call, and then checks that
* it received it.
*/
#include <asm/kvm.h>
#include <asm/kvm_para.h>
#include <sys/eventfd.h>
#include <linux/sizes.h>
#include "processor.h"
#include "test_util.h"
#include "kvm_util.h"
#include "gic.h"
#include "gic_v3.h"
#include "vgic.h"
#define GICD_BASE_GPA 0x08000000ULL
#define GICR_BASE_GPA 0x080A0000ULL
#define VCPU_ID 0
/*
* Stores the user specified args; it's passed to the guest and to every test
* function.
*/
struct test_args {
uint32_t nr_irqs; /* number of KVM supported IRQs. */
bool eoi_split; /* 1 is eoir+dir, 0 is eoir only */
bool level_sensitive; /* 1 is level, 0 is edge */
int kvm_max_routes; /* output of KVM_CAP_IRQ_ROUTING */
bool kvm_supports_irqfd; /* output of KVM_CAP_IRQFD */
};
/*
* KVM implements 32 priority levels:
* 0x00 (highest priority) - 0xF8 (lowest priority), in steps of 8
*
* Note that these macros will still be correct in the case that KVM implements
* more priority levels. Also note that 32 is the minimum for GICv3 and GICv2.
*/
#define KVM_NUM_PRIOS 32
#define KVM_PRIO_SHIFT 3 /* steps of 8 = 1 << 3 */
#define KVM_PRIO_STEPS (1 << KVM_PRIO_SHIFT) /* 8 */
#define LOWEST_PRIO (KVM_NUM_PRIOS - 1)
#define CPU_PRIO_MASK (LOWEST_PRIO << KVM_PRIO_SHIFT) /* 0xf8 */
#define IRQ_DEFAULT_PRIO (LOWEST_PRIO - 1)
#define IRQ_DEFAULT_PRIO_REG (IRQ_DEFAULT_PRIO << KVM_PRIO_SHIFT) /* 0xf0 */
static void *dist = (void *)GICD_BASE_GPA;
static void *redist = (void *)GICR_BASE_GPA;
/*
* The kvm_inject_* utilities are used by the guest to ask the host to inject
* interrupts (e.g., using the KVM_IRQ_LINE ioctl).
*/
typedef enum {
KVM_INJECT_EDGE_IRQ_LINE = 1,
KVM_SET_IRQ_LINE,
KVM_SET_IRQ_LINE_HIGH,
KVM_SET_LEVEL_INFO_HIGH,
KVM_INJECT_IRQFD,
KVM_WRITE_ISPENDR,
KVM_WRITE_ISACTIVER,
} kvm_inject_cmd;
struct kvm_inject_args {
kvm_inject_cmd cmd;
uint32_t first_intid;
uint32_t num;
int level;
bool expect_failure;
};
/* Used on the guest side to perform the hypercall. */
static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
uint32_t num, int level, bool expect_failure);
/* Used on the host side to get the hypercall info. */
static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
struct kvm_inject_args *args);
#define _KVM_INJECT_MULTI(cmd, intid, num, expect_failure) \
kvm_inject_call(cmd, intid, num, -1 /* not used */, expect_failure)
#define KVM_INJECT_MULTI(cmd, intid, num) \
_KVM_INJECT_MULTI(cmd, intid, num, false)
#define _KVM_INJECT(cmd, intid, expect_failure) \
_KVM_INJECT_MULTI(cmd, intid, 1, expect_failure)
#define KVM_INJECT(cmd, intid) \
_KVM_INJECT_MULTI(cmd, intid, 1, false)
#define KVM_ACTIVATE(cmd, intid) \
kvm_inject_call(cmd, intid, 1, 1, false);
struct kvm_inject_desc {
kvm_inject_cmd cmd;
/* can inject PPIs, PPIs, and/or SPIs. */
bool sgi, ppi, spi;
};
static struct kvm_inject_desc inject_edge_fns[] = {
/* sgi ppi spi */
{ KVM_INJECT_EDGE_IRQ_LINE, false, false, true },
{ KVM_INJECT_IRQFD, false, false, true },
{ KVM_WRITE_ISPENDR, true, false, true },
{ 0, },
};
static struct kvm_inject_desc inject_level_fns[] = {
/* sgi ppi spi */
{ KVM_SET_IRQ_LINE_HIGH, false, true, true },
{ KVM_SET_LEVEL_INFO_HIGH, false, true, true },
{ KVM_INJECT_IRQFD, false, false, true },
{ KVM_WRITE_ISPENDR, false, true, true },
{ 0, },
};
static struct kvm_inject_desc set_active_fns[] = {
/* sgi ppi spi */
{ KVM_WRITE_ISACTIVER, true, true, true },
{ 0, },
};
#define for_each_inject_fn(t, f) \
for ((f) = (t); (f)->cmd; (f)++)
#define for_each_supported_inject_fn(args, t, f) \
for_each_inject_fn(t, f) \
if ((args)->kvm_supports_irqfd || (f)->cmd != KVM_INJECT_IRQFD)
#define for_each_supported_activate_fn(args, t, f) \
for_each_supported_inject_fn((args), (t), (f))
/* Shared between the guest main thread and the IRQ handlers. */
volatile uint64_t irq_handled;
volatile uint32_t irqnr_received[MAX_SPI + 1];
static void reset_stats(void)
{
int i;
irq_handled = 0;
for (i = 0; i <= MAX_SPI; i++)
irqnr_received[i] = 0;
}
static uint64_t gic_read_ap1r0(void)
{
uint64_t reg = read_sysreg_s(SYS_ICV_AP1R0_EL1);
dsb(sy);
return reg;
}
static void gic_write_ap1r0(uint64_t val)
{
write_sysreg_s(val, SYS_ICV_AP1R0_EL1);
isb();
}
static void guest_set_irq_line(uint32_t intid, uint32_t level);
static void guest_irq_generic_handler(bool eoi_split, bool level_sensitive)
{
uint32_t intid = gic_get_and_ack_irq();
if (intid == IAR_SPURIOUS)
return;
GUEST_ASSERT(gic_irq_get_active(intid));
if (!level_sensitive)
GUEST_ASSERT(!gic_irq_get_pending(intid));
if (level_sensitive)
guest_set_irq_line(intid, 0);
GUEST_ASSERT(intid < MAX_SPI);
irqnr_received[intid] += 1;
irq_handled += 1;
gic_set_eoi(intid);
GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
if (eoi_split)
gic_set_dir(intid);
GUEST_ASSERT(!gic_irq_get_active(intid));
GUEST_ASSERT(!gic_irq_get_pending(intid));
}
static void kvm_inject_call(kvm_inject_cmd cmd, uint32_t first_intid,
uint32_t num, int level, bool expect_failure)
{
struct kvm_inject_args args = {
.cmd = cmd,
.first_intid = first_intid,
.num = num,
.level = level,
.expect_failure = expect_failure,
};
GUEST_SYNC(&args);
}
#define GUEST_ASSERT_IAR_EMPTY() \
do { \
uint32_t _intid; \
_intid = gic_get_and_ack_irq(); \
GUEST_ASSERT(_intid == 0 || _intid == IAR_SPURIOUS); \
} while (0)
#define CAT_HELPER(a, b) a ## b
#define CAT(a, b) CAT_HELPER(a, b)
#define PREFIX guest_irq_handler_
#define GUEST_IRQ_HANDLER_NAME(split, lev) CAT(PREFIX, CAT(split, lev))
#define GENERATE_GUEST_IRQ_HANDLER(split, lev) \
static void CAT(PREFIX, CAT(split, lev))(struct ex_regs *regs) \
{ \
guest_irq_generic_handler(split, lev); \
}
GENERATE_GUEST_IRQ_HANDLER(0, 0);
GENERATE_GUEST_IRQ_HANDLER(0, 1);
GENERATE_GUEST_IRQ_HANDLER(1, 0);
GENERATE_GUEST_IRQ_HANDLER(1, 1);
static void (*guest_irq_handlers[2][2])(struct ex_regs *) = {
{GUEST_IRQ_HANDLER_NAME(0, 0), GUEST_IRQ_HANDLER_NAME(0, 1),},
{GUEST_IRQ_HANDLER_NAME(1, 0), GUEST_IRQ_HANDLER_NAME(1, 1),},
};
static void reset_priorities(struct test_args *args)
{
int i;
for (i = 0; i < args->nr_irqs; i++)
gic_set_priority(i, IRQ_DEFAULT_PRIO_REG);
}
static void guest_set_irq_line(uint32_t intid, uint32_t level)
{
kvm_inject_call(KVM_SET_IRQ_LINE, intid, 1, level, false);
}
static void test_inject_fail(struct test_args *args,
uint32_t intid, kvm_inject_cmd cmd)
{
reset_stats();
_KVM_INJECT(cmd, intid, true);
/* no IRQ to handle on entry */
GUEST_ASSERT_EQ(irq_handled, 0);
GUEST_ASSERT_IAR_EMPTY();
}
static void guest_inject(struct test_args *args,
uint32_t first_intid, uint32_t num,
kvm_inject_cmd cmd)
{
uint32_t i;
reset_stats();
/* Cycle over all priorities to make things more interesting. */
for (i = first_intid; i < num + first_intid; i++)
gic_set_priority(i, (i % (KVM_NUM_PRIOS - 1)) << 3);
asm volatile("msr daifset, #2" : : : "memory");
KVM_INJECT_MULTI(cmd, first_intid, num);
while (irq_handled < num) {
asm volatile("wfi\n"
"msr daifclr, #2\n"
/* handle IRQ */
"msr daifset, #2\n"
: : : "memory");
}
asm volatile("msr daifclr, #2" : : : "memory");
GUEST_ASSERT_EQ(irq_handled, num);
for (i = first_intid; i < num + first_intid; i++)
GUEST_ASSERT_EQ(irqnr_received[i], 1);
GUEST_ASSERT_IAR_EMPTY();
reset_priorities(args);
}
/*
* Restore the active state of multiple concurrent IRQs (given by
* concurrent_irqs). This does what a live-migration would do on the
* destination side assuming there are some active IRQs that were not
* deactivated yet.
*/
static void guest_restore_active(struct test_args *args,
uint32_t first_intid, uint32_t num,
kvm_inject_cmd cmd)
{
uint32_t prio, intid, ap1r;
int i;
/* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
* in descending order, so intid+1 can preempt intid.
*/
for (i = 0, prio = (num - 1) * 8; i < num; i++, prio -= 8) {
GUEST_ASSERT(prio >= 0);
intid = i + first_intid;
gic_set_priority(intid, prio);
}
/* In a real migration, KVM would restore all GIC state before running
* guest code.
*/
for (i = 0; i < num; i++) {
intid = i + first_intid;
KVM_ACTIVATE(cmd, intid);
ap1r = gic_read_ap1r0();
ap1r |= 1U << i;
gic_write_ap1r0(ap1r);
}
/* This is where the "migration" would occur. */
/* finish handling the IRQs starting with the highest priority one. */
for (i = 0; i < num; i++) {
intid = num - i - 1 + first_intid;
gic_set_eoi(intid);
if (args->eoi_split)
gic_set_dir(intid);
}
for (i = 0; i < num; i++)
GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
GUEST_ASSERT_IAR_EMPTY();
}
/*
* Polls the IAR until it's not a spurious interrupt.
*
* This function should only be used in test_inject_preemption (with IRQs
* masked).
*/
static uint32_t wait_for_and_activate_irq(void)
{
uint32_t intid;
do {
asm volatile("wfi" : : : "memory");
intid = gic_get_and_ack_irq();
} while (intid == IAR_SPURIOUS);
return intid;
}
/*
* Inject multiple concurrent IRQs (num IRQs starting at first_intid) and
* handle them without handling the actual exceptions. This is done by masking
* interrupts for the whole test.
*/
static void test_inject_preemption(struct test_args *args,
uint32_t first_intid, int num,
kvm_inject_cmd cmd)
{
uint32_t intid, prio, step = KVM_PRIO_STEPS;
int i;
/* Set the priorities of the first (KVM_NUM_PRIOS - 1) IRQs
* in descending order, so intid+1 can preempt intid.
*/
for (i = 0, prio = (num - 1) * step; i < num; i++, prio -= step) {
GUEST_ASSERT(prio >= 0);
intid = i + first_intid;
gic_set_priority(intid, prio);
}
local_irq_disable();
for (i = 0; i < num; i++) {
uint32_t tmp;
intid = i + first_intid;
KVM_INJECT(cmd, intid);
/* Each successive IRQ will preempt the previous one. */
tmp = wait_for_and_activate_irq();
GUEST_ASSERT_EQ(tmp, intid);
if (args->level_sensitive)
guest_set_irq_line(intid, 0);
}
/* finish handling the IRQs starting with the highest priority one. */
for (i = 0; i < num; i++) {
intid = num - i - 1 + first_intid;
gic_set_eoi(intid);
if (args->eoi_split)
gic_set_dir(intid);
}
local_irq_enable();
for (i = 0; i < num; i++)
GUEST_ASSERT(!gic_irq_get_active(i + first_intid));
GUEST_ASSERT_EQ(gic_read_ap1r0(), 0);
GUEST_ASSERT_IAR_EMPTY();
reset_priorities(args);
}
static void test_injection(struct test_args *args, struct kvm_inject_desc *f)
{
uint32_t nr_irqs = args->nr_irqs;
if (f->sgi) {
guest_inject(args, MIN_SGI, 1, f->cmd);
guest_inject(args, 0, 16, f->cmd);
}
if (f->ppi)
guest_inject(args, MIN_PPI, 1, f->cmd);
if (f->spi) {
guest_inject(args, MIN_SPI, 1, f->cmd);
guest_inject(args, nr_irqs - 1, 1, f->cmd);
guest_inject(args, MIN_SPI, nr_irqs - MIN_SPI, f->cmd);
}
}
static void test_injection_failure(struct test_args *args,
struct kvm_inject_desc *f)
{
uint32_t bad_intid[] = { args->nr_irqs, 1020, 1024, 1120, 5120, ~0U, };
int i;
for (i = 0; i < ARRAY_SIZE(bad_intid); i++)
test_inject_fail(args, bad_intid[i], f->cmd);
}
static void test_preemption(struct test_args *args, struct kvm_inject_desc *f)
{
/*
* Test up to 4 levels of preemption. The reason is that KVM doesn't
* currently implement the ability to have more than the number-of-LRs
* number of concurrently active IRQs. The number of LRs implemented is
* IMPLEMENTATION DEFINED, however, it seems that most implement 4.
*/
if (f->sgi)
test_inject_preemption(args, MIN_SGI, 4, f->cmd);
if (f->ppi)
test_inject_preemption(args, MIN_PPI, 4, f->cmd);
if (f->spi)
test_inject_preemption(args, MIN_SPI, 4, f->cmd);
}
static void test_restore_active(struct test_args *args, struct kvm_inject_desc *f)
{
/* Test up to 4 active IRQs. Same reason as in test_preemption. */
if (f->sgi)
guest_restore_active(args, MIN_SGI, 4, f->cmd);
if (f->ppi)
guest_restore_active(args, MIN_PPI, 4, f->cmd);
if (f->spi)
guest_restore_active(args, MIN_SPI, 4, f->cmd);
}
static void guest_code(struct test_args args)
{
uint32_t i, nr_irqs = args.nr_irqs;
bool level_sensitive = args.level_sensitive;
struct kvm_inject_desc *f, *inject_fns;
gic_init(GIC_V3, 1, dist, redist);
for (i = 0; i < nr_irqs; i++)
gic_irq_enable(i);
for (i = MIN_SPI; i < nr_irqs; i++)
gic_irq_set_config(i, !args.level_sensitive);
gic_set_eoi_split(args.eoi_split);
reset_priorities(&args);
gic_set_priority_mask(CPU_PRIO_MASK);
inject_fns = level_sensitive ? inject_level_fns
: inject_edge_fns;
local_irq_enable();
/* Start the tests. */
for_each_supported_inject_fn(&args, inject_fns, f) {
test_injection(&args, f);
test_preemption(&args, f);
test_injection_failure(&args, f);
}
/* Restore the active state of IRQs. This would happen when live
* migrating IRQs in the middle of being handled.
*/
for_each_supported_activate_fn(&args, set_active_fns, f)
test_restore_active(&args, f);
GUEST_DONE();
}
static void kvm_irq_line_check(struct kvm_vm *vm, uint32_t intid, int level,
struct test_args *test_args, bool expect_failure)
{
int ret;
if (!expect_failure) {
kvm_arm_irq_line(vm, intid, level);
} else {
/* The interface doesn't allow larger intid's. */
if (intid > KVM_ARM_IRQ_NUM_MASK)
return;
ret = _kvm_arm_irq_line(vm, intid, level);
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Bad intid %i did not cause KVM_IRQ_LINE "
"error: rc: %i errno: %i", intid, ret, errno);
}
}
void kvm_irq_set_level_info_check(int gic_fd, uint32_t intid, int level,
bool expect_failure)
{
if (!expect_failure) {
kvm_irq_set_level_info(gic_fd, intid, level);
} else {
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
/*
* The kernel silently fails for invalid SPIs and SGIs (which
* are not level-sensitive). It only checks for intid to not
* spill over 1U << 10 (the max reserved SPI). Also, callers
* are supposed to mask the intid with 0x3ff (1023).
*/
if (intid > VGIC_MAX_RESERVED)
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Bad intid %i did not cause VGIC_GRP_LEVEL_INFO "
"error: rc: %i errno: %i", intid, ret, errno);
else
TEST_ASSERT(!ret, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO "
"for intid %i failed, rc: %i errno: %i",
intid, ret, errno);
}
}
static void kvm_set_gsi_routing_irqchip_check(struct kvm_vm *vm,
uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
bool expect_failure)
{
struct kvm_irq_routing *routing;
int ret;
uint64_t i;
assert(num <= kvm_max_routes && kvm_max_routes <= KVM_MAX_IRQ_ROUTES);
routing = kvm_gsi_routing_create();
for (i = intid; i < (uint64_t)intid + num; i++)
kvm_gsi_routing_irqchip_add(routing, i - MIN_SPI, i - MIN_SPI);
if (!expect_failure) {
kvm_gsi_routing_write(vm, routing);
} else {
ret = _kvm_gsi_routing_write(vm, routing);
/* The kernel only checks for KVM_IRQCHIP_NUM_PINS. */
if (intid >= KVM_IRQCHIP_NUM_PINS)
TEST_ASSERT(ret != 0 && errno == EINVAL,
"Bad intid %u did not cause KVM_SET_GSI_ROUTING "
"error: rc: %i errno: %i", intid, ret, errno);
else
TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING "
"for intid %i failed, rc: %i errno: %i",
intid, ret, errno);
}
}
static void kvm_irq_write_ispendr_check(int gic_fd, uint32_t intid,
uint32_t vcpu, bool expect_failure)
{
/*
* Ignore this when expecting failure as invalid intids will lead to
* either trying to inject SGIs when we configured the test to be
* level_sensitive (or the reverse), or inject large intids which
* will lead to writing above the ISPENDR register space (and we
* don't want to do that either).
*/
if (!expect_failure)
kvm_irq_write_ispendr(gic_fd, intid, vcpu);
}
static void kvm_routing_and_irqfd_check(struct kvm_vm *vm,
uint32_t intid, uint32_t num, uint32_t kvm_max_routes,
bool expect_failure)
{
int fd[MAX_SPI];
uint64_t val;
int ret, f;
uint64_t i;
/*
* There is no way to try injecting an SGI or PPI as the interface
* starts counting from the first SPI (above the private ones), so just
* exit.
*/
if (INTID_IS_SGI(intid) || INTID_IS_PPI(intid))
return;
kvm_set_gsi_routing_irqchip_check(vm, intid, num,
kvm_max_routes, expect_failure);
/*
* If expect_failure, then just to inject anyway. These
* will silently fail. And in any case, the guest will check
* that no actual interrupt was injected for those cases.
*/
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
fd[f] = eventfd(0, 0);
TEST_ASSERT(fd[f] != -1,
"eventfd failed, errno: %i\n", errno);
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
struct kvm_irqfd irqfd = {
.fd = fd[f],
.gsi = i - MIN_SPI,
};
assert(i <= (uint64_t)UINT_MAX);
vm_ioctl(vm, KVM_IRQFD, &irqfd);
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++) {
val = 1;
ret = write(fd[f], &val, sizeof(uint64_t));
TEST_ASSERT(ret == sizeof(uint64_t),
"Write to KVM_IRQFD failed with ret: %d\n", ret);
}
for (f = 0, i = intid; i < (uint64_t)intid + num; i++, f++)
close(fd[f]);
}
/* handles the valid case: intid=0xffffffff num=1 */
#define for_each_intid(first, num, tmp, i) \
for ((tmp) = (i) = (first); \
(tmp) < (uint64_t)(first) + (uint64_t)(num); \
(tmp)++, (i)++)
static void run_guest_cmd(struct kvm_vm *vm, int gic_fd,
struct kvm_inject_args *inject_args,
struct test_args *test_args)
{
kvm_inject_cmd cmd = inject_args->cmd;
uint32_t intid = inject_args->first_intid;
uint32_t num = inject_args->num;
int level = inject_args->level;
bool expect_failure = inject_args->expect_failure;
uint64_t tmp;
uint32_t i;
/* handles the valid case: intid=0xffffffff num=1 */
assert(intid < UINT_MAX - num || num == 1);
switch (cmd) {
case KVM_INJECT_EDGE_IRQ_LINE:
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, 1, test_args,
expect_failure);
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, 0, test_args,
expect_failure);
break;
case KVM_SET_IRQ_LINE:
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, level, test_args,
expect_failure);
break;
case KVM_SET_IRQ_LINE_HIGH:
for_each_intid(intid, num, tmp, i)
kvm_irq_line_check(vm, i, 1, test_args,
expect_failure);
break;
case KVM_SET_LEVEL_INFO_HIGH:
for_each_intid(intid, num, tmp, i)
kvm_irq_set_level_info_check(gic_fd, i, 1,
expect_failure);
break;
case KVM_INJECT_IRQFD:
kvm_routing_and_irqfd_check(vm, intid, num,
test_args->kvm_max_routes,
expect_failure);
break;
case KVM_WRITE_ISPENDR:
for (i = intid; i < intid + num; i++)
kvm_irq_write_ispendr_check(gic_fd, i,
VCPU_ID, expect_failure);
break;
case KVM_WRITE_ISACTIVER:
for (i = intid; i < intid + num; i++)
kvm_irq_write_isactiver(gic_fd, i, VCPU_ID);
break;
default:
break;
}
}
static void kvm_inject_get_call(struct kvm_vm *vm, struct ucall *uc,
struct kvm_inject_args *args)
{
struct kvm_inject_args *kvm_args_hva;
vm_vaddr_t kvm_args_gva;
kvm_args_gva = uc->args[1];
kvm_args_hva = (struct kvm_inject_args *)addr_gva2hva(vm, kvm_args_gva);
memcpy(args, kvm_args_hva, sizeof(struct kvm_inject_args));
}
static void print_args(struct test_args *args)
{
printf("nr-irqs=%d level-sensitive=%d eoi-split=%d\n",
args->nr_irqs, args->level_sensitive,
args->eoi_split);
}
static void test_vgic(uint32_t nr_irqs, bool level_sensitive, bool eoi_split)
{
struct ucall uc;
int gic_fd;
struct kvm_vm *vm;
struct kvm_inject_args inject_args;
struct test_args args = {
.nr_irqs = nr_irqs,
.level_sensitive = level_sensitive,
.eoi_split = eoi_split,
.kvm_max_routes = kvm_check_cap(KVM_CAP_IRQ_ROUTING),
.kvm_supports_irqfd = kvm_check_cap(KVM_CAP_IRQFD),
};
print_args(&args);
vm = vm_create_default(VCPU_ID, 0, guest_code);
ucall_init(vm, NULL);
vm_init_descriptor_tables(vm);
vcpu_init_descriptor_tables(vm, VCPU_ID);
/* Setup the guest args page (so it gets the args). */
vcpu_args_set(vm, 0, 1, args);
gic_fd = vgic_v3_setup(vm, 1, nr_irqs,
GICD_BASE_GPA, GICR_BASE_GPA);
vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT,
guest_irq_handlers[args.eoi_split][args.level_sensitive]);
while (1) {
vcpu_run(vm, VCPU_ID);
switch (get_ucall(vm, VCPU_ID, &uc)) {
case UCALL_SYNC:
kvm_inject_get_call(vm, &uc, &inject_args);
run_guest_cmd(vm, gic_fd, &inject_args, &args);
break;
case UCALL_ABORT:
TEST_FAIL("%s at %s:%ld\n\tvalues: %#lx, %#lx",
(const char *)uc.args[0],
__FILE__, uc.args[1], uc.args[2], uc.args[3]);
break;
case UCALL_DONE:
goto done;
default:
TEST_FAIL("Unknown ucall %lu", uc.cmd);
}
}
done:
close(gic_fd);
kvm_vm_free(vm);
}
static void help(const char *name)
{
printf(
"\n"
"usage: %s [-n num_irqs] [-e eoi_split] [-l level_sensitive]\n", name);
printf(" -n: specify number of IRQs to setup the vgic with. "
"It has to be a multiple of 32 and between 64 and 1024.\n");
printf(" -e: if 1 then EOI is split into a write to DIR on top "
"of writing EOI.\n");
printf(" -l: specify whether the IRQs are level-sensitive (1) or not (0).");
puts("");
exit(1);
}
int main(int argc, char **argv)
{
uint32_t nr_irqs = 64;
bool default_args = true;
bool level_sensitive = false;
int opt;
bool eoi_split = false;
/* Tell stdout not to buffer its content */
setbuf(stdout, NULL);
while ((opt = getopt(argc, argv, "hn:e:l:")) != -1) {
switch (opt) {
case 'n':
nr_irqs = atoi(optarg);
if (nr_irqs > 1024 || nr_irqs % 32)
help(argv[0]);
break;
case 'e':
eoi_split = (bool)atoi(optarg);
default_args = false;
break;
case 'l':
level_sensitive = (bool)atoi(optarg);
default_args = false;
break;
case 'h':
default:
help(argv[0]);
break;
}
}
/* If the user just specified nr_irqs and/or gic_version, then run all
* combinations.
*/
if (default_args) {
test_vgic(nr_irqs, false /* level */, false /* eoi_split */);
test_vgic(nr_irqs, false /* level */, true /* eoi_split */);
test_vgic(nr_irqs, true /* level */, false /* eoi_split */);
test_vgic(nr_irqs, true /* level */, true /* eoi_split */);
} else {
test_vgic(nr_irqs, level_sensitive, eoi_split);
}
return 0;
}
...@@ -11,11 +11,37 @@ enum gic_type { ...@@ -11,11 +11,37 @@ enum gic_type {
GIC_TYPE_MAX, GIC_TYPE_MAX,
}; };
#define MIN_SGI 0
#define MIN_PPI 16
#define MIN_SPI 32
#define MAX_SPI 1019
#define IAR_SPURIOUS 1023
#define INTID_IS_SGI(intid) (0 <= (intid) && (intid) < MIN_PPI)
#define INTID_IS_PPI(intid) (MIN_PPI <= (intid) && (intid) < MIN_SPI)
#define INTID_IS_SPI(intid) (MIN_SPI <= (intid) && (intid) <= MAX_SPI)
void gic_init(enum gic_type type, unsigned int nr_cpus, void gic_init(enum gic_type type, unsigned int nr_cpus,
void *dist_base, void *redist_base); void *dist_base, void *redist_base);
void gic_irq_enable(unsigned int intid); void gic_irq_enable(unsigned int intid);
void gic_irq_disable(unsigned int intid); void gic_irq_disable(unsigned int intid);
unsigned int gic_get_and_ack_irq(void); unsigned int gic_get_and_ack_irq(void);
void gic_set_eoi(unsigned int intid); void gic_set_eoi(unsigned int intid);
void gic_set_dir(unsigned int intid);
/*
* Sets the EOI mode. When split is false, EOI just drops the priority. When
* split is true, EOI drops the priority and deactivates the interrupt.
*/
void gic_set_eoi_split(bool split);
void gic_set_priority_mask(uint64_t mask);
void gic_set_priority(uint32_t intid, uint32_t prio);
void gic_irq_set_active(unsigned int intid);
void gic_irq_clear_active(unsigned int intid);
bool gic_irq_get_active(unsigned int intid);
void gic_irq_set_pending(unsigned int intid);
void gic_irq_clear_pending(unsigned int intid);
bool gic_irq_get_pending(unsigned int intid);
void gic_irq_set_config(unsigned int intid, bool is_edge);
#endif /* SELFTEST_KVM_GIC_H */ #endif /* SELFTEST_KVM_GIC_H */
...@@ -16,8 +16,12 @@ ...@@ -16,8 +16,12 @@
#define GICD_IGROUPR 0x0080 #define GICD_IGROUPR 0x0080
#define GICD_ISENABLER 0x0100 #define GICD_ISENABLER 0x0100
#define GICD_ICENABLER 0x0180 #define GICD_ICENABLER 0x0180
#define GICD_ISPENDR 0x0200
#define GICD_ICPENDR 0x0280
#define GICD_ICACTIVER 0x0380 #define GICD_ICACTIVER 0x0380
#define GICD_ISACTIVER 0x0300
#define GICD_IPRIORITYR 0x0400 #define GICD_IPRIORITYR 0x0400
#define GICD_ICFGR 0x0C00
/* /*
* The assumption is that the guest runs in a non-secure mode. * The assumption is that the guest runs in a non-secure mode.
...@@ -49,16 +53,24 @@ ...@@ -49,16 +53,24 @@
#define GICR_IGROUPR0 GICD_IGROUPR #define GICR_IGROUPR0 GICD_IGROUPR
#define GICR_ISENABLER0 GICD_ISENABLER #define GICR_ISENABLER0 GICD_ISENABLER
#define GICR_ICENABLER0 GICD_ICENABLER #define GICR_ICENABLER0 GICD_ICENABLER
#define GICR_ISPENDR0 GICD_ISPENDR
#define GICR_ISACTIVER0 GICD_ISACTIVER
#define GICR_ICACTIVER0 GICD_ICACTIVER #define GICR_ICACTIVER0 GICD_ICACTIVER
#define GICR_ICENABLER GICD_ICENABLER
#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IPRIORITYR0 GICD_IPRIORITYR #define GICR_IPRIORITYR0 GICD_IPRIORITYR
/* CPU interface registers */ /* CPU interface registers */
#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) #define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) #define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) #define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) #define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
#define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) #define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
#define SYS_ICV_AP1R0_EL1 sys_reg(3, 0, 12, 9, 0)
#define ICC_PMR_DEF_PRIO 0xf0 #define ICC_PMR_DEF_PRIO 0xf0
#define ICC_SRE_EL1_SRE (1U << 0) #define ICC_SRE_EL1_SRE (1U << 0)
......
...@@ -113,6 +113,9 @@ enum { ...@@ -113,6 +113,9 @@ enum {
#define ESR_EC_WP_CURRENT 0x35 #define ESR_EC_WP_CURRENT 0x35
#define ESR_EC_BRK_INS 0x3c #define ESR_EC_BRK_INS 0x3c
void aarch64_get_supported_page_sizes(uint32_t ipa,
bool *ps4k, bool *ps16k, bool *ps64k);
void vm_init_descriptor_tables(struct kvm_vm *vm); void vm_init_descriptor_tables(struct kvm_vm *vm);
void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid); void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid);
......
...@@ -14,7 +14,21 @@ ...@@ -14,7 +14,21 @@
((uint64_t)(flags) << 12) | \ ((uint64_t)(flags) << 12) | \
index) index)
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
uint64_t gicd_base_gpa, uint64_t gicr_base_gpa); uint64_t gicd_base_gpa, uint64_t gicr_base_gpa);
#endif /* SELFTEST_KVM_VGIC_H */ #define VGIC_MAX_RESERVED 1023
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level);
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level);
/* The vcpu arg only applies to private interrupts. */
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu);
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu);
#define KVM_IRQCHIP_NUM_PINS (1020 - 32)
#endif // SELFTEST_KVM_VGIC_H
...@@ -42,18 +42,26 @@ enum vm_guest_mode { ...@@ -42,18 +42,26 @@ enum vm_guest_mode {
VM_MODE_P52V48_4K, VM_MODE_P52V48_4K,
VM_MODE_P52V48_64K, VM_MODE_P52V48_64K,
VM_MODE_P48V48_4K, VM_MODE_P48V48_4K,
VM_MODE_P48V48_16K,
VM_MODE_P48V48_64K, VM_MODE_P48V48_64K,
VM_MODE_P40V48_4K, VM_MODE_P40V48_4K,
VM_MODE_P40V48_16K,
VM_MODE_P40V48_64K, VM_MODE_P40V48_64K,
VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */ VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
VM_MODE_P47V64_4K, VM_MODE_P47V64_4K,
VM_MODE_P44V64_4K, VM_MODE_P44V64_4K,
VM_MODE_P36V48_4K,
VM_MODE_P36V48_16K,
VM_MODE_P36V48_64K,
VM_MODE_P36V47_16K,
NUM_VM_MODES, NUM_VM_MODES,
}; };
#if defined(__aarch64__) #if defined(__aarch64__)
#define VM_MODE_DEFAULT VM_MODE_P40V48_4K extern enum vm_guest_mode vm_mode_default;
#define VM_MODE_DEFAULT vm_mode_default
#define MIN_PAGE_SHIFT 12U #define MIN_PAGE_SHIFT 12U
#define ptes_per_page(page_size) ((page_size) / 8) #define ptes_per_page(page_size) ((page_size) / 8)
...@@ -240,6 +248,8 @@ int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, ...@@ -240,6 +248,8 @@ int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write); void *val, bool write);
int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr, int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
void *val, bool write); void *val, bool write);
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr); uint64_t attr);
...@@ -250,6 +260,14 @@ int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, ...@@ -250,6 +260,14 @@ int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
uint64_t attr, void *val, bool write); uint64_t attr, void *val, bool write);
#define KVM_MAX_IRQ_ROUTES 4096
struct kvm_irq_routing *kvm_gsi_routing_create(void);
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
uint32_t gsi, uint32_t pin);
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
const char *exit_reason_str(unsigned int exit_reason); const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm); void virt_pgd_alloc(struct kvm_vm *vm);
......
...@@ -93,3 +93,69 @@ void gic_set_eoi(unsigned int intid) ...@@ -93,3 +93,69 @@ void gic_set_eoi(unsigned int intid)
GUEST_ASSERT(gic_common_ops); GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_write_eoir(intid); gic_common_ops->gic_write_eoir(intid);
} }
void gic_set_dir(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_write_dir(intid);
}
void gic_set_eoi_split(bool split)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_eoi_split(split);
}
void gic_set_priority_mask(uint64_t pmr)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority_mask(pmr);
}
void gic_set_priority(unsigned int intid, unsigned int prio)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_set_priority(intid, prio);
}
void gic_irq_set_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_active(intid);
}
void gic_irq_clear_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_clear_active(intid);
}
bool gic_irq_get_active(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
return gic_common_ops->gic_irq_get_active(intid);
}
void gic_irq_set_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_pending(intid);
}
void gic_irq_clear_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_clear_pending(intid);
}
bool gic_irq_get_pending(unsigned int intid)
{
GUEST_ASSERT(gic_common_ops);
return gic_common_ops->gic_irq_get_pending(intid);
}
void gic_irq_set_config(unsigned int intid, bool is_edge)
{
GUEST_ASSERT(gic_common_ops);
gic_common_ops->gic_irq_set_config(intid, is_edge);
}
...@@ -14,6 +14,17 @@ struct gic_common_ops { ...@@ -14,6 +14,17 @@ struct gic_common_ops {
void (*gic_irq_disable)(unsigned int intid); void (*gic_irq_disable)(unsigned int intid);
uint64_t (*gic_read_iar)(void); uint64_t (*gic_read_iar)(void);
void (*gic_write_eoir)(uint32_t irq); void (*gic_write_eoir)(uint32_t irq);
void (*gic_write_dir)(uint32_t irq);
void (*gic_set_eoi_split)(bool split);
void (*gic_set_priority_mask)(uint64_t mask);
void (*gic_set_priority)(uint32_t intid, uint32_t prio);
void (*gic_irq_set_active)(uint32_t intid);
void (*gic_irq_clear_active)(uint32_t intid);
bool (*gic_irq_get_active)(uint32_t intid);
void (*gic_irq_set_pending)(uint32_t intid);
void (*gic_irq_clear_pending)(uint32_t intid);
bool (*gic_irq_get_pending)(uint32_t intid);
void (*gic_irq_set_config)(uint32_t intid, bool is_edge);
}; };
extern const struct gic_common_ops gicv3_ops; extern const struct gic_common_ops gicv3_ops;
......
...@@ -20,6 +20,7 @@ struct gicv3_data { ...@@ -20,6 +20,7 @@ struct gicv3_data {
}; };
#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) #define sgi_base_from_redist(redist_base) (redist_base + SZ_64K)
#define DIST_BIT (1U << 31)
enum gicv3_intid_range { enum gicv3_intid_range {
SGI_RANGE, SGI_RANGE,
...@@ -50,6 +51,14 @@ static void gicv3_gicr_wait_for_rwp(void *redist_base) ...@@ -50,6 +51,14 @@ static void gicv3_gicr_wait_for_rwp(void *redist_base)
} }
} }
static void gicv3_wait_for_rwp(uint32_t cpu_or_dist)
{
if (cpu_or_dist & DIST_BIT)
gicv3_gicd_wait_for_rwp();
else
gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]);
}
static enum gicv3_intid_range get_intid_range(unsigned int intid) static enum gicv3_intid_range get_intid_range(unsigned int intid)
{ {
switch (intid) { switch (intid) {
...@@ -81,39 +90,175 @@ static void gicv3_write_eoir(uint32_t irq) ...@@ -81,39 +90,175 @@ static void gicv3_write_eoir(uint32_t irq)
isb(); isb();
} }
static void static void gicv3_write_dir(uint32_t irq)
gicv3_config_irq(unsigned int intid, unsigned int offset) {
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
}
static void gicv3_set_priority_mask(uint64_t mask)
{
write_sysreg_s(mask, SYS_ICC_PMR_EL1);
}
static void gicv3_set_eoi_split(bool split)
{
uint32_t val;
/* All other fields are read-only, so no need to read CTLR first. In
* fact, the kernel does the same.
*/
val = split ? (1U << 1) : 0;
write_sysreg_s(val, SYS_ICC_CTLR_EL1);
isb();
}
uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset)
{
void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
return readl(base + offset);
}
void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val)
{
void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base
: sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]);
writel(reg_val, base + offset);
}
uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask)
{
return gicv3_reg_readl(cpu_or_dist, offset) & mask;
}
void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset,
uint32_t mask, uint32_t reg_val)
{
uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask;
tmp |= (reg_val & mask);
gicv3_reg_writel(cpu_or_dist, offset, tmp);
}
/*
* We use a single offset for the distributor and redistributor maps as they
* have the same value in both. The only exceptions are registers that only
* exist in one and not the other, like GICR_WAKER that doesn't exist in the
* distributor map. Such registers are conveniently marked as reserved in the
* map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being
* marked as "Reserved" in the Distributor map.
*/
static void gicv3_access_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field,
bool write, uint32_t *val)
{ {
uint32_t cpu = guest_get_vcpuid(); uint32_t cpu = guest_get_vcpuid();
uint32_t mask = 1 << (intid % 32);
enum gicv3_intid_range intid_range = get_intid_range(intid); enum gicv3_intid_range intid_range = get_intid_range(intid);
void *reg; uint32_t fields_per_reg, index, mask, shift;
uint32_t cpu_or_dist;
/* We care about 'cpu' only for SGIs or PPIs */ GUEST_ASSERT(bits_per_field <= reg_bits);
if (intid_range == SGI_RANGE || intid_range == PPI_RANGE) { GUEST_ASSERT(*val < (1U << bits_per_field));
GUEST_ASSERT(cpu < gicv3_data.nr_cpus); /* Some registers like IROUTER are 64 bit long. Those are currently not
* supported by readl nor writel, so just asserting here until then.
*/
GUEST_ASSERT(reg_bits == 32);
reg = sgi_base_from_redist(gicv3_data.redist_base[cpu]) + fields_per_reg = reg_bits / bits_per_field;
offset; index = intid % fields_per_reg;
writel(mask, reg); shift = index * bits_per_field;
gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu]); mask = ((1U << bits_per_field) - 1) << shift;
} else if (intid_range == SPI_RANGE) {
reg = gicv3_data.dist_base + offset + (intid / 32) * 4; /* Set offset to the actual register holding intid's config. */
writel(mask, reg); offset += (intid / fields_per_reg) * (reg_bits / 8);
gicv3_gicd_wait_for_rwp();
} else { cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu;
GUEST_ASSERT(0);
} if (write)
gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift);
*val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift;
}
static void gicv3_write_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field, uint32_t val)
{
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, true, &val);
}
static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset,
uint32_t reg_bits, uint32_t bits_per_field)
{
uint32_t val;
gicv3_access_reg(intid, offset, reg_bits,
bits_per_field, false, &val);
return val;
}
static void gicv3_set_priority(uint32_t intid, uint32_t prio)
{
gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio);
}
/* Sets the intid to be level-sensitive or edge-triggered. */
static void gicv3_irq_set_config(uint32_t intid, bool is_edge)
{
uint32_t val;
/* N/A for private interrupts. */
GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE);
val = is_edge ? 2 : 0;
gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val);
}
static void gicv3_irq_enable(uint32_t intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
uint32_t cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
static void gicv3_irq_disable(uint32_t intid)
{
bool is_spi = get_intid_range(intid) == SPI_RANGE;
uint32_t cpu = guest_get_vcpuid();
gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1);
gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu);
}
static void gicv3_irq_set_active(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1);
}
static void gicv3_irq_clear_active(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1);
}
static bool gicv3_irq_get_active(uint32_t intid)
{
return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1);
}
static void gicv3_irq_set_pending(uint32_t intid)
{
gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1);
} }
static void gicv3_irq_enable(unsigned int intid) static void gicv3_irq_clear_pending(uint32_t intid)
{ {
gicv3_config_irq(intid, GICD_ISENABLER); gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1);
} }
static void gicv3_irq_disable(unsigned int intid) static bool gicv3_irq_get_pending(uint32_t intid)
{ {
gicv3_config_irq(intid, GICD_ICENABLER); return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1);
} }
static void gicv3_enable_redist(void *redist_base) static void gicv3_enable_redist(void *redist_base)
...@@ -237,4 +382,15 @@ const struct gic_common_ops gicv3_ops = { ...@@ -237,4 +382,15 @@ const struct gic_common_ops gicv3_ops = {
.gic_irq_disable = gicv3_irq_disable, .gic_irq_disable = gicv3_irq_disable,
.gic_read_iar = gicv3_read_iar, .gic_read_iar = gicv3_read_iar,
.gic_write_eoir = gicv3_write_eoir, .gic_write_eoir = gicv3_write_eoir,
.gic_write_dir = gicv3_write_dir,
.gic_set_priority_mask = gicv3_set_priority_mask,
.gic_set_eoi_split = gicv3_set_eoi_split,
.gic_set_priority = gicv3_set_priority,
.gic_irq_set_active = gicv3_irq_set_active,
.gic_irq_clear_active = gicv3_irq_clear_active,
.gic_irq_get_active = gicv3_irq_get_active,
.gic_irq_set_pending = gicv3_irq_set_pending,
.gic_irq_clear_pending = gicv3_irq_clear_pending,
.gic_irq_get_pending = gicv3_irq_get_pending,
.gic_irq_set_config = gicv3_irq_set_config,
}; };
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <assert.h> #include <assert.h>
#include "guest_modes.h"
#include "kvm_util.h" #include "kvm_util.h"
#include "../kvm_util_internal.h" #include "../kvm_util_internal.h"
#include "processor.h" #include "processor.h"
...@@ -237,6 +238,7 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init ...@@ -237,6 +238,7 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1); get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
/* Configure base granule size */
switch (vm->mode) { switch (vm->mode) {
case VM_MODE_P52V48_4K: case VM_MODE_P52V48_4K:
TEST_FAIL("AArch64 does not support 4K sized pages " TEST_FAIL("AArch64 does not support 4K sized pages "
...@@ -245,25 +247,47 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init ...@@ -245,25 +247,47 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init
TEST_FAIL("AArch64 does not support 4K sized pages " TEST_FAIL("AArch64 does not support 4K sized pages "
"with ANY-bit physical address ranges"); "with ANY-bit physical address ranges");
case VM_MODE_P52V48_64K: case VM_MODE_P52V48_64K:
case VM_MODE_P48V48_64K:
case VM_MODE_P40V48_64K:
case VM_MODE_P36V48_64K:
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ break;
case VM_MODE_P48V48_16K:
case VM_MODE_P40V48_16K:
case VM_MODE_P36V48_16K:
case VM_MODE_P36V47_16K:
tcr_el1 |= 2ul << 14; /* TG0 = 16KB */
break; break;
case VM_MODE_P48V48_4K: case VM_MODE_P48V48_4K:
case VM_MODE_P40V48_4K:
case VM_MODE_P36V48_4K:
tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
break; break;
default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
}
/* Configure output size */
switch (vm->mode) {
case VM_MODE_P52V48_64K:
tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
break;
case VM_MODE_P48V48_4K:
case VM_MODE_P48V48_16K:
case VM_MODE_P48V48_64K: case VM_MODE_P48V48_64K:
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
tcr_el1 |= 5ul << 32; /* IPS = 48 bits */ tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
break; break;
case VM_MODE_P40V48_4K: case VM_MODE_P40V48_4K:
tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ case VM_MODE_P40V48_16K:
tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
break;
case VM_MODE_P40V48_64K: case VM_MODE_P40V48_64K:
tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
tcr_el1 |= 2ul << 32; /* IPS = 40 bits */ tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
break; break;
case VM_MODE_P36V48_4K:
case VM_MODE_P36V48_16K:
case VM_MODE_P36V48_64K:
case VM_MODE_P36V47_16K:
tcr_el1 |= 1ul << 32; /* IPS = 36 bits */
break;
default: default:
TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
} }
...@@ -432,3 +456,47 @@ uint32_t guest_get_vcpuid(void) ...@@ -432,3 +456,47 @@ uint32_t guest_get_vcpuid(void)
{ {
return read_sysreg(tpidr_el1); return read_sysreg(tpidr_el1);
} }
void aarch64_get_supported_page_sizes(uint32_t ipa,
bool *ps4k, bool *ps16k, bool *ps64k)
{
struct kvm_vcpu_init preferred_init;
int kvm_fd, vm_fd, vcpu_fd, err;
uint64_t val;
struct kvm_one_reg reg = {
.id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1),
.addr = (uint64_t)&val,
};
kvm_fd = open_kvm_dev_path_or_exit();
vm_fd = ioctl(kvm_fd, KVM_CREATE_VM, ipa);
TEST_ASSERT(vm_fd >= 0, "Can't create VM");
vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0);
TEST_ASSERT(vcpu_fd >= 0, "Can't create vcpu");
err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init);
TEST_ASSERT(err == 0, "Can't get target");
err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init);
TEST_ASSERT(err == 0, "Can't get init vcpu");
err = ioctl(vcpu_fd, KVM_GET_ONE_REG, &reg);
TEST_ASSERT(err == 0, "Can't get MMFR0");
*ps4k = ((val >> 28) & 0xf) != 0xf;
*ps64k = ((val >> 24) & 0xf) == 0;
*ps16k = ((val >> 20) & 0xf) != 0;
close(vcpu_fd);
close(vm_fd);
close(kvm_fd);
}
/*
* arm64 doesn't have a true default mode, so start by computing the
* available IPA space and page sizes early.
*/
void __attribute__((constructor)) init_guest_modes(void)
{
guest_modes_append_default();
}
...@@ -5,11 +5,14 @@ ...@@ -5,11 +5,14 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <asm/kvm_para.h>
#include <asm/kvm.h> #include <asm/kvm.h>
#include "kvm_util.h" #include "kvm_util.h"
#include "../kvm_util_internal.h" #include "../kvm_util_internal.h"
#include "vgic.h" #include "vgic.h"
#include "gic.h"
#include "gic_v3.h"
/* /*
* vGIC-v3 default host setup * vGIC-v3 default host setup
...@@ -28,7 +31,7 @@ ...@@ -28,7 +31,7 @@
* redistributor regions of the guest. Since it depends on the number of * redistributor regions of the guest. Since it depends on the number of
* vCPUs for the VM, it must be called after all the vCPUs have been created. * vCPUs for the VM, it must be called after all the vCPUs have been created.
*/ */
int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs,
uint64_t gicd_base_gpa, uint64_t gicr_base_gpa) uint64_t gicd_base_gpa, uint64_t gicr_base_gpa)
{ {
int gic_fd; int gic_fd;
...@@ -50,6 +53,13 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, ...@@ -50,6 +53,13 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
/* Distributor setup */ /* Distributor setup */
gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false); gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS,
0, &nr_irqs, true);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true); KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true);
nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE); nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE);
...@@ -68,3 +78,94 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, ...@@ -68,3 +78,94 @@ int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
return gic_fd; return gic_fd;
} }
/* should only work for level sensitive interrupts */
int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
uint64_t attr = 32 * (intid / 32);
uint64_t index = intid % 32;
uint64_t val;
int ret;
ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
attr, &val, false);
if (ret != 0)
return ret;
val |= 1U << index;
ret = _kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO,
attr, &val, true);
return ret;
}
void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level)
{
int ret = _kvm_irq_set_level_info(gic_fd, intid, level);
TEST_ASSERT(ret == 0, "KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO failed, "
"rc: %i errno: %i", ret, errno);
}
int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK;
TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself "
"doesn't allow injecting SGIs. There's no mask for it.");
if (INTID_IS_PPI(intid))
irq |= KVM_ARM_IRQ_TYPE_PPI << KVM_ARM_IRQ_TYPE_SHIFT;
else
irq |= KVM_ARM_IRQ_TYPE_SPI << KVM_ARM_IRQ_TYPE_SHIFT;
return _kvm_irq_line(vm, irq, level);
}
void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level)
{
int ret = _kvm_arm_irq_line(vm, intid, level);
TEST_ASSERT(ret == 0, "KVM_IRQ_LINE failed, rc: %i errno: %i",
ret, errno);
}
static void vgic_poke_irq(int gic_fd, uint32_t intid,
uint32_t vcpu, uint64_t reg_off)
{
uint64_t reg = intid / 32;
uint64_t index = intid % 32;
uint64_t attr = reg_off + reg * 4;
uint64_t val;
bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid);
/* Check that the addr part of the attr is within 32 bits. */
assert(attr <= KVM_DEV_ARM_VGIC_OFFSET_MASK);
uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
: KVM_DEV_ARM_VGIC_GRP_DIST_REGS;
if (intid_is_private) {
/* TODO: only vcpu 0 implemented for now. */
assert(vcpu == 0);
attr += SZ_64K;
}
/* All calls will succeed, even with invalid intid's, as long as the
* addr part of the attr is within 32 bits (checked above). An invalid
* intid will just make the read/writes point to above the intended
* register space (i.e., ICPENDR after ISPENDR).
*/
kvm_device_access(gic_fd, group, attr, &val, false);
val |= 1ULL << index;
kvm_device_access(gic_fd, group, attr, &val, true);
}
void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, uint32_t vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR);
}
void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, uint32_t vcpu)
{
vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER);
}
...@@ -4,22 +4,59 @@ ...@@ -4,22 +4,59 @@
*/ */
#include "guest_modes.h" #include "guest_modes.h"
#ifdef __aarch64__
#include "processor.h"
enum vm_guest_mode vm_mode_default;
#endif
struct guest_mode guest_modes[NUM_VM_MODES]; struct guest_mode guest_modes[NUM_VM_MODES];
void guest_modes_append_default(void) void guest_modes_append_default(void)
{ {
#ifndef __aarch64__
guest_mode_append(VM_MODE_DEFAULT, true, true); guest_mode_append(VM_MODE_DEFAULT, true, true);
#else
#ifdef __aarch64__
guest_mode_append(VM_MODE_P40V48_64K, true, true);
{ {
unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
bool ps4k, ps16k, ps64k;
int i;
aarch64_get_supported_page_sizes(limit, &ps4k, &ps16k, &ps64k);
vm_mode_default = NUM_VM_MODES;
if (limit >= 52) if (limit >= 52)
guest_mode_append(VM_MODE_P52V48_64K, true, true); guest_mode_append(VM_MODE_P52V48_64K, ps64k, ps64k);
if (limit >= 48) { if (limit >= 48) {
guest_mode_append(VM_MODE_P48V48_4K, true, true); guest_mode_append(VM_MODE_P48V48_4K, ps4k, ps4k);
guest_mode_append(VM_MODE_P48V48_64K, true, true); guest_mode_append(VM_MODE_P48V48_16K, ps16k, ps16k);
guest_mode_append(VM_MODE_P48V48_64K, ps64k, ps64k);
}
if (limit >= 40) {
guest_mode_append(VM_MODE_P40V48_4K, ps4k, ps4k);
guest_mode_append(VM_MODE_P40V48_16K, ps16k, ps16k);
guest_mode_append(VM_MODE_P40V48_64K, ps64k, ps64k);
if (ps4k)
vm_mode_default = VM_MODE_P40V48_4K;
} }
if (limit >= 36) {
guest_mode_append(VM_MODE_P36V48_4K, ps4k, ps4k);
guest_mode_append(VM_MODE_P36V48_16K, ps16k, ps16k);
guest_mode_append(VM_MODE_P36V48_64K, ps64k, ps64k);
guest_mode_append(VM_MODE_P36V47_16K, ps16k, ps16k);
}
/*
* Pick the first supported IPA size if the default
* isn't available.
*/
for (i = 0; vm_mode_default == NUM_VM_MODES && i < NUM_VM_MODES; i++) {
if (guest_modes[i].supported && guest_modes[i].enabled)
vm_mode_default = i;
}
TEST_ASSERT(vm_mode_default != NUM_VM_MODES,
"No supported mode!");
} }
#endif #endif
#ifdef __s390x__ #ifdef __s390x__
......
...@@ -166,12 +166,18 @@ const char *vm_guest_mode_string(uint32_t i) ...@@ -166,12 +166,18 @@ const char *vm_guest_mode_string(uint32_t i)
[VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
[VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages", [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
[VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages", [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
[VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages",
[VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages", [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
[VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages", [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
[VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
[VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
[VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
[VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
[VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
[VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
[VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages",
[VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
[VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
}; };
_Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES, _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
"Missing new mode strings?"); "Missing new mode strings?");
...@@ -185,12 +191,18 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = { ...@@ -185,12 +191,18 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
[VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 }, [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
[VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 }, [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
[VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 }, [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
[VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 },
[VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 }, [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
[VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 }, [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
[VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
[VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 }, [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
[VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 }, [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
[VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 }, [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
[VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 }, [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
[VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
[VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 },
[VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
[VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
}; };
_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
"Missing new mode params?"); "Missing new mode params?");
...@@ -252,9 +264,19 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) ...@@ -252,9 +264,19 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
vm->pgtable_levels = 3; vm->pgtable_levels = 3;
break; break;
case VM_MODE_P40V48_4K: case VM_MODE_P40V48_4K:
case VM_MODE_P36V48_4K:
vm->pgtable_levels = 4; vm->pgtable_levels = 4;
break; break;
case VM_MODE_P40V48_64K: case VM_MODE_P40V48_64K:
case VM_MODE_P36V48_64K:
vm->pgtable_levels = 3;
break;
case VM_MODE_P48V48_16K:
case VM_MODE_P40V48_16K:
case VM_MODE_P36V48_16K:
vm->pgtable_levels = 4;
break;
case VM_MODE_P36V47_16K:
vm->pgtable_levels = 3; vm->pgtable_levels = 3;
break; break;
case VM_MODE_PXXV48_4K: case VM_MODE_PXXV48_4K:
...@@ -2086,6 +2108,78 @@ int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group, ...@@ -2086,6 +2108,78 @@ int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
return ret; return ret;
} }
/*
* IRQ related functions.
*/
int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
struct kvm_irq_level irq_level = {
.irq = irq,
.level = level,
};
return _vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
}
void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
{
int ret = _kvm_irq_line(vm, irq, level);
TEST_ASSERT(ret >= 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", ret, errno);
}
struct kvm_irq_routing *kvm_gsi_routing_create(void)
{
struct kvm_irq_routing *routing;
size_t size;
size = sizeof(struct kvm_irq_routing);
/* Allocate space for the max number of entries: this wastes 196 KBs. */
size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
routing = calloc(1, size);
assert(routing);
return routing;
}
void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
uint32_t gsi, uint32_t pin)
{
int i;
assert(routing);
assert(routing->nr < KVM_MAX_IRQ_ROUTES);
i = routing->nr;
routing->entries[i].gsi = gsi;
routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
routing->entries[i].flags = 0;
routing->entries[i].u.irqchip.irqchip = 0;
routing->entries[i].u.irqchip.pin = pin;
routing->nr++;
}
int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
{
int ret;
assert(routing);
ret = ioctl(vm_get_fd(vm), KVM_SET_GSI_ROUTING, routing);
free(routing);
return ret;
}
void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
{
int ret;
ret = _kvm_gsi_routing_write(vm, routing);
TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING failed, rc: %i errno: %i",
ret, errno);
}
/* /*
* VM Dump * VM Dump
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment