Commit 699116c4 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvmarm-fixes-5.10-1' of...

Merge tag 'kvmarm-fixes-5.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD

KVM/arm64 fixes for 5.10, take #1

- Force PTE mapping on device pages provided via VFIO
- Fix detection of cacheable mapping at S2
- Fallback to PMD/PTE mappings for composite huge pages
- Fix accounting of Stage-2 PGD allocation
- Fix AArch32 handling of some of the debug registers
- Simplify host HYP entry
- Fix stray pointer conversion on nVHE TLB invalidation
- Fix initialization of the nVHE code
- Simplify handling of capabilities exposed to HYP
- Nuke VCPUs caught using a forbidden AArch32 EL0
parents 5a169bf0 22f55384
...@@ -375,6 +375,23 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, ...@@ -375,6 +375,23 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry,
return false; return false;
} }
static __always_inline bool is_vhe_hyp_code(void)
{
/* Only defined for code run in VHE hyp context */
return __is_defined(__KVM_VHE_HYPERVISOR__);
}
static __always_inline bool is_nvhe_hyp_code(void)
{
/* Only defined for code run in NVHE hyp context */
return __is_defined(__KVM_NVHE_HYPERVISOR__);
}
static __always_inline bool is_hyp_code(void)
{
return is_vhe_hyp_code() || is_nvhe_hyp_code();
}
extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
extern struct static_key_false arm64_const_caps_ready; extern struct static_key_false arm64_const_caps_ready;
...@@ -428,35 +445,40 @@ static __always_inline bool __cpus_have_const_cap(int num) ...@@ -428,35 +445,40 @@ static __always_inline bool __cpus_have_const_cap(int num)
} }
/* /*
* Test for a capability, possibly with a runtime check. * Test for a capability without a runtime check.
* *
* Before capabilities are finalized, this behaves as cpus_have_cap(). * Before capabilities are finalized, this will BUG().
* After capabilities are finalized, this is patched to avoid a runtime check. * After capabilities are finalized, this is patched to avoid a runtime check.
* *
* @num must be a compile-time constant. * @num must be a compile-time constant.
*/ */
static __always_inline bool cpus_have_const_cap(int num) static __always_inline bool cpus_have_final_cap(int num)
{ {
if (system_capabilities_finalized()) if (system_capabilities_finalized())
return __cpus_have_const_cap(num); return __cpus_have_const_cap(num);
else else
return cpus_have_cap(num); BUG();
} }
/* /*
* Test for a capability without a runtime check. * Test for a capability, possibly with a runtime check for non-hyp code.
* *
* Before capabilities are finalized, this will BUG(). * For hyp code, this behaves the same as cpus_have_final_cap().
*
* For non-hyp code:
* Before capabilities are finalized, this behaves as cpus_have_cap().
* After capabilities are finalized, this is patched to avoid a runtime check. * After capabilities are finalized, this is patched to avoid a runtime check.
* *
* @num must be a compile-time constant. * @num must be a compile-time constant.
*/ */
static __always_inline bool cpus_have_final_cap(int num) static __always_inline bool cpus_have_const_cap(int num)
{ {
if (system_capabilities_finalized()) if (is_hyp_code())
return cpus_have_final_cap(num);
else if (system_capabilities_finalized())
return __cpus_have_const_cap(num); return __cpus_have_const_cap(num);
else else
BUG(); return cpus_have_cap(num);
} }
static inline void cpus_set_cap(unsigned int num) static inline void cpus_set_cap(unsigned int num)
......
...@@ -239,6 +239,7 @@ enum vcpu_sysreg { ...@@ -239,6 +239,7 @@ enum vcpu_sysreg {
#define cp14_DBGWCR0 (DBGWCR0_EL1 * 2) #define cp14_DBGWCR0 (DBGWCR0_EL1 * 2)
#define cp14_DBGWVR0 (DBGWVR0_EL1 * 2) #define cp14_DBGWVR0 (DBGWVR0_EL1 * 2)
#define cp14_DBGDCCINT (MDCCINT_EL1 * 2) #define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
#define cp14_DBGVCR (DBGVCR32_EL2 * 2)
#define NR_COPRO_REGS (NR_SYS_REGS * 2) #define NR_COPRO_REGS (NR_SYS_REGS * 2)
......
...@@ -86,13 +86,12 @@ static inline bool is_kernel_in_hyp_mode(void) ...@@ -86,13 +86,12 @@ static inline bool is_kernel_in_hyp_mode(void)
static __always_inline bool has_vhe(void) static __always_inline bool has_vhe(void)
{ {
/* /*
* The following macros are defined for code specic to VHE/nVHE. * Code only run in VHE/NVHE hyp context can assume VHE is present or
* If has_vhe() is inlined into those compilation units, it can * absent. Otherwise fall back to caps.
* be determined statically. Otherwise fall back to caps.
*/ */
if (__is_defined(__KVM_VHE_HYPERVISOR__)) if (is_vhe_hyp_code())
return true; return true;
else if (__is_defined(__KVM_NVHE_HYPERVISOR__)) else if (is_nvhe_hyp_code())
return false; return false;
else else
return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN); return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
......
...@@ -87,7 +87,6 @@ KVM_NVHE_ALIAS(__icache_flags); ...@@ -87,7 +87,6 @@ KVM_NVHE_ALIAS(__icache_flags);
/* Kernel symbols needed for cpus_have_final/const_caps checks. */ /* Kernel symbols needed for cpus_have_final/const_caps checks. */
KVM_NVHE_ALIAS(arm64_const_caps_ready); KVM_NVHE_ALIAS(arm64_const_caps_ready);
KVM_NVHE_ALIAS(cpu_hwcap_keys); KVM_NVHE_ALIAS(cpu_hwcap_keys);
KVM_NVHE_ALIAS(cpu_hwcaps);
/* Static keys which are set if a vGIC trap should be handled in hyp. */ /* Static keys which are set if a vGIC trap should be handled in hyp. */
KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v2_cpuif_trap);
......
...@@ -808,6 +808,25 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) ...@@ -808,6 +808,25 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
preempt_enable(); preempt_enable();
/*
* The ARMv8 architecture doesn't give the hypervisor
* a mechanism to prevent a guest from dropping to AArch32 EL0
* if implemented by the CPU. If we spot the guest in such
* state and that we decided it wasn't supposed to do so (like
* with the asymmetric AArch32 case), return to userspace with
* a fatal error.
*/
if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
/*
* As we have caught the guest red-handed, decide that
* it isn't fit for purpose anymore by making the vcpu
* invalid. The VMM can try and fix it by issuing a
* KVM_ARM_VCPU_INIT if it really wants to.
*/
vcpu->arch.target = -1;
ret = ARM_EXCEPTION_IL;
}
ret = handle_exit(vcpu, ret); ret = handle_exit(vcpu, ret);
} }
......
...@@ -17,8 +17,6 @@ SYM_FUNC_START(__host_exit) ...@@ -17,8 +17,6 @@ SYM_FUNC_START(__host_exit)
get_host_ctxt x0, x1 get_host_ctxt x0, x1
ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
/* Store the host regs x2 and x3 */ /* Store the host regs x2 and x3 */
stp x2, x3, [x0, #CPU_XREG_OFFSET(2)] stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]
......
...@@ -57,16 +57,25 @@ __do_hyp_init: ...@@ -57,16 +57,25 @@ __do_hyp_init:
cmp x0, #HVC_STUB_HCALL_NR cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc b.lo __kvm_handle_stub_hvc
/* Set tpidr_el2 for use by HYP to free a register */ // We only actively check bits [24:31], and everything
msr tpidr_el2, x2 // else has to be zero, which we check at build time.
#if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF)
mov x2, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) #error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value
cmp x0, x2 #endif
b.eq 1f
ror x0, x0, #24
eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF)
ror x0, x0, #4
eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF)
cbz x0, 1f
mov x0, #SMCCC_RET_NOT_SUPPORTED mov x0, #SMCCC_RET_NOT_SUPPORTED
eret eret
1: phys_to_ttbr x0, x1 1:
/* Set tpidr_el2 for use by HYP to free a register */
msr tpidr_el2, x2
phys_to_ttbr x0, x1
alternative_if ARM64_HAS_CNP alternative_if ARM64_HAS_CNP
orr x0, x0, #TTBR_CNP_BIT orr x0, x0, #TTBR_CNP_BIT
alternative_else_nop_endif alternative_else_nop_endif
......
...@@ -128,7 +128,6 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu) ...@@ -128,7 +128,6 @@ void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu)
struct tlb_inv_context cxt; struct tlb_inv_context cxt;
/* Switch to requested VMID */ /* Switch to requested VMID */
mmu = kern_hyp_va(mmu);
__tlb_switch_to_guest(mmu, &cxt); __tlb_switch_to_guest(mmu, &cxt);
__tlbi(vmalle1); __tlbi(vmalle1);
......
...@@ -635,7 +635,7 @@ static void stage2_flush_dcache(void *addr, u64 size) ...@@ -635,7 +635,7 @@ static void stage2_flush_dcache(void *addr, u64 size)
static bool stage2_pte_cacheable(kvm_pte_t pte) static bool stage2_pte_cacheable(kvm_pte_t pte)
{ {
u64 memattr = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR, pte); u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
return memattr == PAGE_S2_MEMATTR(NORMAL); return memattr == PAGE_S2_MEMATTR(NORMAL);
} }
...@@ -846,7 +846,7 @@ int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm) ...@@ -846,7 +846,7 @@ int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm)
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE; pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL | __GFP_ZERO); pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
if (!pgt->pgd) if (!pgt->pgd)
return -ENOMEM; return -ENOMEM;
......
...@@ -787,14 +787,26 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -787,14 +787,26 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_shift = PAGE_SHIFT; vma_shift = PAGE_SHIFT;
} }
if (vma_shift == PUD_SHIFT && switch (vma_shift) {
!fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) case PUD_SHIFT:
if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE))
break;
fallthrough;
case CONT_PMD_SHIFT:
vma_shift = PMD_SHIFT; vma_shift = PMD_SHIFT;
fallthrough;
if (vma_shift == PMD_SHIFT && case PMD_SHIFT:
!fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE))
force_pte = true; break;
fallthrough;
case CONT_PTE_SHIFT:
vma_shift = PAGE_SHIFT; vma_shift = PAGE_SHIFT;
force_pte = true;
fallthrough;
case PAGE_SHIFT:
break;
default:
WARN_ONCE(1, "Unknown vma_shift %d", vma_shift);
} }
vma_pagesize = 1UL << vma_shift; vma_pagesize = 1UL << vma_shift;
...@@ -839,6 +851,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, ...@@ -839,6 +851,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (kvm_is_device_pfn(pfn)) { if (kvm_is_device_pfn(pfn)) {
device = true; device = true;
force_pte = true;
} else if (logging_active && !write_fault) { } else if (logging_active && !write_fault) {
/* /*
* Only actually map the page as writable if this was a write * Only actually map the page as writable if this was a write
......
...@@ -1897,9 +1897,9 @@ static const struct sys_reg_desc cp14_regs[] = { ...@@ -1897,9 +1897,9 @@ static const struct sys_reg_desc cp14_regs[] = {
{ Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
DBG_BCR_BVR_WCR_WVR(1), DBG_BCR_BVR_WCR_WVR(1),
/* DBGDCCINT */ /* DBGDCCINT */
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
/* DBGDSCRext */ /* DBGDSCRext */
{ Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
DBG_BCR_BVR_WCR_WVR(2), DBG_BCR_BVR_WCR_WVR(2),
/* DBGDTR[RT]Xint */ /* DBGDTR[RT]Xint */
{ Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
...@@ -1914,7 +1914,7 @@ static const struct sys_reg_desc cp14_regs[] = { ...@@ -1914,7 +1914,7 @@ static const struct sys_reg_desc cp14_regs[] = {
{ Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
DBG_BCR_BVR_WCR_WVR(6), DBG_BCR_BVR_WCR_WVR(6),
/* DBGVCR */ /* DBGVCR */
{ Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
DBG_BCR_BVR_WCR_WVR(7), DBG_BCR_BVR_WCR_WVR(7),
DBG_BCR_BVR_WCR_WVR(8), DBG_BCR_BVR_WCR_WVR(8),
DBG_BCR_BVR_WCR_WVR(9), DBG_BCR_BVR_WCR_WVR(9),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment