Commit a040adfb authored by Oliver Upton's avatar Oliver Upton

Merge branch kvm-arm64/misc into kvmarm/next

* kvm-arm64/misc:
  : Miscellaneous updates
  :
  :  - Fix handling of features w/ nonzero safe values in set_id_regs
  :    selftest
  :
  :  - Cleanup the unused kern_hyp_va() asm macro
  :
  :  - Differentiate nVHE and hVHE in boot-time message
  :
  :  - Several selftests cleanups
  :
  :  - Drop bogus return value from kvm_arch_create_vm_debugfs()
  :
  :  - Make save/restore of SPE and TRBE control registers affect EL1 state
  :    in hVHE mode
  :
  :  - Typos
  KVM: arm64: Fix TRFCR_EL1/PMSCR_EL1 access in hVHE mode
  KVM: selftests: aarch64: Remove unused functions from vpmu test
  KVM: arm64: Fix typos
  KVM: Get rid of return value from kvm_arch_create_vm_debugfs()
  KVM: selftests: Print timer ctl register in ISTATUS assertion
  KVM: selftests: Fix GUEST_PRINTF() format warnings in ARM code
  KVM: arm64: removed unused kern_hyp_va asm macro
  KVM: arm64: add comments to __kern_hyp_va
  KVM: arm64: print Hyp mode
  KVM: arm64: selftests: Handle feature fields with nonzero minimum value correctly
Signed-off-by: default avatarOliver Upton <oliver.upton@linux.dev>
parents 262cd16e 9a3bfb27
......@@ -70,7 +70,7 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
/*
* Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
* static inline can allow the compiler to out-of-line this. KVM always wants
* the macro version as its always inlined.
* the macro version as it's always inlined.
*/
#define __kvm_swab32(x) ___constant_swab32(x)
......
......@@ -53,27 +53,6 @@
#include <asm/alternative.h>
/*
* Convert a kernel VA into a HYP VA.
* reg: VA to be converted.
*
* The actual code generation takes place in kvm_update_va_mask, and
* the instructions below are only there to reserve the space and
* perform the register allocation (kvm_update_va_mask uses the
* specific registers encoded in the instructions).
*/
.macro kern_hyp_va reg
#ifndef __KVM_VHE_HYPERVISOR__
alternative_cb ARM64_ALWAYS_SYSTEM, kvm_update_va_mask
and \reg, \reg, #1 /* mask with va_mask */
ror \reg, \reg, #1 /* rotate to the first tag bit */
add \reg, \reg, #0 /* insert the low 12 bits of the tag */
add \reg, \reg, #0, lsl 12 /* insert the top 12 bits of the tag */
ror \reg, \reg, #63 /* rotate back */
alternative_cb_end
#endif
.endm
/*
* Convert a hypervisor VA to a PA
* reg: hypervisor address to be converted in place
......@@ -127,14 +106,29 @@ void kvm_apply_hyp_relocations(void);
#define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
/*
* Convert a kernel VA into a HYP VA.
*
* Can be called from hyp or non-hyp context.
*
* The actual code generation takes place in kvm_update_va_mask(), and
* the instructions below are only there to reserve the space and
* perform the register allocation (kvm_update_va_mask() uses the
* specific registers encoded in the instructions).
*/
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{
/*
* This #ifndef is an optimisation for when this is called from VHE hyp
* context. When called from a VHE non-hyp context, kvm_update_va_mask() will
* replace the instructions with `nop`s.
*/
#ifndef __KVM_VHE_HYPERVISOR__
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
"ror %0, %0, #1\n"
"add %0, %0, #0\n"
"add %0, %0, #0, lsl 12\n"
"ror %0, %0, #63\n",
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n" /* mask with va_mask */
"ror %0, %0, #1\n" /* rotate to the first tag bit */
"add %0, %0, #0\n" /* insert the low 12 bits of the tag */
"add %0, %0, #0, lsl 12\n" /* insert the top 12 bits of the tag */
"ror %0, %0, #63\n", /* rotate back */
ARM64_ALWAYS_SYSTEM,
kvm_update_va_mask)
: "+r" (v));
......
......@@ -745,7 +745,7 @@ static void kvm_timer_vcpu_load_nested_switch(struct kvm_vcpu *vcpu,
WARN_ON_ONCE(ret);
/*
* The virtual offset behaviour is "interresting", as it
* The virtual offset behaviour is "interesting", as it
* always applies when HCR_EL2.E2H==0, but only when
* accessed from EL1 when HCR_EL2.E2H==1. So make sure we
* track E2H when putting the HV timer in "direct" mode.
......
......@@ -2591,7 +2591,8 @@ static __init int kvm_arm_init(void)
} else if (in_hyp_mode) {
kvm_info("VHE mode initialized successfully\n");
} else {
kvm_info("Hyp mode initialized successfully\n");
char mode = cpus_have_final_cap(ARM64_KVM_HVHE) ? 'h' : 'n';
kvm_info("Hyp mode (%cVHE) initialized successfully\n", mode);
}
/*
......
......@@ -117,7 +117,7 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
}
/*
* Called just before entering the guest once we are no longer preemptable
* Called just before entering the guest once we are no longer preemptible
* and interrupts are disabled. If we have managed to run anything using
* FP while we were preemptible (such as off the back of an interrupt),
* then neither the host nor the guest own the FP hardware (and it was the
......
......@@ -31,8 +31,8 @@ static void __debug_save_spe(u64 *pmscr_el1)
return;
/* Yes; save the control register and disable data generation */
*pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
write_sysreg_s(0, SYS_PMSCR_EL1);
*pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
write_sysreg_el1(0, SYS_PMSCR);
isb();
/* Now drain all buffered data to memory */
......@@ -48,7 +48,7 @@ static void __debug_restore_spe(u64 pmscr_el1)
isb();
/* Re-enable data generation */
write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
write_sysreg_el1(pmscr_el1, SYS_PMSCR);
}
static void __debug_save_trace(u64 *trfcr_el1)
......@@ -63,8 +63,8 @@ static void __debug_save_trace(u64 *trfcr_el1)
* Since access to TRFCR_EL1 is trapped, the guest can't
* modify the filtering set by the host.
*/
*trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1);
write_sysreg_s(0, SYS_TRFCR_EL1);
*trfcr_el1 = read_sysreg_el1(SYS_TRFCR);
write_sysreg_el1(0, SYS_TRFCR);
isb();
/* Drain the trace buffer to memory */
tsb_csync();
......@@ -76,7 +76,7 @@ static void __debug_restore_trace(u64 trfcr_el1)
return;
/* Restore trace filter controls */
write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
write_sysreg_el1(trfcr_el1, SYS_TRFCR);
}
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
......
......@@ -110,7 +110,7 @@ SYM_FUNC_END(__host_enter)
* u64 elr, u64 par);
*/
SYM_FUNC_START(__hyp_do_panic)
/* Prepare and exit to the host's panic funciton. */
/* Prepare and exit to the host's panic function. */
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
......
......@@ -155,7 +155,7 @@ int hyp_back_vmemmap(phys_addr_t back)
start = hyp_memory[i].base;
start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
/*
* The begining of the hyp_vmemmap region for the current
* The beginning of the hyp_vmemmap region for the current
* memblock may already be backed by the page backing the end
* the previous region, so avoid mapping it twice.
*/
......@@ -408,7 +408,7 @@ static void *admit_host_page(void *arg)
return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
}
/* Refill our local memcache by poping pages from the one provided by the host. */
/* Refill our local memcache by popping pages from the one provided by the host. */
int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
struct kvm_hyp_memcache *host_mc)
{
......
......@@ -134,7 +134,7 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
if (vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE) {
fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
} else {
/* no need to shuffle FS[4] into DFSR[10] as its 0 */
/* no need to shuffle FS[4] into DFSR[10] as it's 0 */
fsr = DFSR_FSC_EXTABT_nLPAE;
}
......
......@@ -309,7 +309,7 @@ int vgic_init(struct kvm *kvm)
vgic_lpi_translation_cache_init(kvm);
/*
* If we have GICv4.1 enabled, unconditionnaly request enable the
* If we have GICv4.1 enabled, unconditionally request enable the
* v4 support so that we get HW-accelerated vSGIs. Otherwise, only
* enable it if we present a virtual ITS to the guest.
*/
......
......@@ -1342,8 +1342,8 @@ static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
}
/**
* vgic_its_invall - invalidate all LPIs targetting a given vcpu
* @vcpu: the vcpu for which the RD is targetted by an invalidation
* vgic_its_invall - invalidate all LPIs targeting a given vcpu
* @vcpu: the vcpu for which the RD is targeted by an invalidation
*
* Contrary to the INVALL command, this targets a RD instead of a
* collection, and we don't need to hold the its_lock, since no ITS is
......
......@@ -2538,9 +2538,8 @@ void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_
vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
}
int kvm_arch_create_vm_debugfs(struct kvm *kvm)
void kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
if (kvm->arch.kvm_ops->create_vm_debugfs)
kvm->arch.kvm_ops->create_vm_debugfs(kvm);
return 0;
}
......@@ -189,9 +189,8 @@ static const struct file_operations mmu_rmaps_stat_fops = {
.release = kvm_mmu_rmaps_stat_release,
};
int kvm_arch_create_vm_debugfs(struct kvm *kvm)
void kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
debugfs_create_file("mmu_rmaps_stat", 0644, kvm->debugfs_dentry, kvm,
&mmu_rmaps_stat_fops);
return 0;
}
......@@ -1507,7 +1507,7 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu);
int kvm_arch_post_init_vm(struct kvm *kvm);
void kvm_arch_pre_destroy_vm(struct kvm *kvm);
int kvm_arch_create_vm_debugfs(struct kvm *kvm);
void kvm_arch_create_vm_debugfs(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
......
......@@ -158,9 +158,9 @@ static void guest_validate_irq(unsigned int intid,
/* Basic 'timer condition met' check */
__GUEST_ASSERT(xcnt >= cval,
"xcnt = 0x%llx, cval = 0x%llx, xcnt_diff_us = 0x%llx",
"xcnt = 0x%lx, cval = 0x%lx, xcnt_diff_us = 0x%lx",
xcnt, cval, xcnt_diff_us);
__GUEST_ASSERT(xctl & CTL_ISTATUS, "xcnt = 0x%llx", xcnt);
__GUEST_ASSERT(xctl & CTL_ISTATUS, "xctl = 0x%lx", xctl);
WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
}
......
......@@ -365,7 +365,7 @@ static void guest_wp_handler(struct ex_regs *regs)
static void guest_ss_handler(struct ex_regs *regs)
{
__GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%u'", ss_idx);
__GUEST_ASSERT(ss_idx < 4, "Expected index < 4, got '%lu'", ss_idx);
ss_addr[ss_idx++] = regs->pc;
regs->pstate |= SPSR_SS;
}
......
......@@ -105,12 +105,12 @@ static void guest_test_hvc(const struct test_hvc_info *hc_info)
case TEST_STAGE_HVC_IFACE_FEAT_DISABLED:
case TEST_STAGE_HVC_IFACE_FALSE_INFO:
__GUEST_ASSERT(res.a0 == SMCCC_RET_NOT_SUPPORTED,
"a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
"a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%lx, stage = %u",
res.a0, hc_info->func_id, hc_info->arg1, stage);
break;
case TEST_STAGE_HVC_IFACE_FEAT_ENABLED:
__GUEST_ASSERT(res.a0 != SMCCC_RET_NOT_SUPPORTED,
"a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%llx, stage = %u",
"a0 = 0x%lx, func_id = 0x%x, arg1 = 0x%lx, stage = %u",
res.a0, hc_info->func_id, hc_info->arg1, stage);
break;
default:
......
......@@ -292,7 +292,7 @@ static void guest_code(struct test_desc *test)
static void no_dabt_handler(struct ex_regs *regs)
{
GUEST_FAIL("Unexpected dabt, far_el1 = 0x%llx", read_sysreg(far_el1));
GUEST_FAIL("Unexpected dabt, far_el1 = 0x%lx", read_sysreg(far_el1));
}
static void no_iabt_handler(struct ex_regs *regs)
......
......@@ -32,6 +32,10 @@ struct reg_ftr_bits {
enum ftr_type type;
uint8_t shift;
uint64_t mask;
/*
* For FTR_EXACT, safe_val is used as the exact safe value.
* For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.
*/
int64_t safe_val;
};
......@@ -65,13 +69,13 @@ struct test_feature_reg {
static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, ID_AA64DFR0_EL1_DebugVer_IMP),
REG_FTR_END,
};
static const struct reg_ftr_bits ftr_id_dfr0_el1[] = {
S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, 0),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, 0),
S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, ID_DFR0_EL1_PerfMon_PMUv3),
REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, ID_DFR0_EL1_CopDbg_Armv8),
REG_FTR_END,
};
......@@ -224,13 +228,13 @@ uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
{
uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
if (ftr_bits->type == FTR_UNSIGNED) {
if (ftr_bits->sign == FTR_UNSIGNED) {
switch (ftr_bits->type) {
case FTR_EXACT:
ftr = ftr_bits->safe_val;
break;
case FTR_LOWER_SAFE:
if (ftr > 0)
if (ftr > ftr_bits->safe_val)
ftr--;
break;
case FTR_HIGHER_SAFE:
......@@ -252,7 +256,7 @@ uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
ftr = ftr_bits->safe_val;
break;
case FTR_LOWER_SAFE:
if (ftr > 0)
if (ftr > ftr_bits->safe_val)
ftr--;
break;
case FTR_HIGHER_SAFE:
......@@ -276,7 +280,7 @@ uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
{
uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
if (ftr_bits->type == FTR_UNSIGNED) {
if (ftr_bits->sign == FTR_UNSIGNED) {
switch (ftr_bits->type) {
case FTR_EXACT:
ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
......
......@@ -93,22 +93,6 @@ static inline void write_sel_evtyper(int sel, unsigned long val)
isb();
}
static inline void enable_counter(int idx)
{
uint64_t v = read_sysreg(pmcntenset_el0);
write_sysreg(BIT(idx) | v, pmcntenset_el0);
isb();
}
static inline void disable_counter(int idx)
{
uint64_t v = read_sysreg(pmcntenset_el0);
write_sysreg(BIT(idx) | v, pmcntenclr_el0);
isb();
}
static void pmu_disable_reset(void)
{
uint64_t pmcr = read_sysreg(pmcr_el0);
......@@ -195,11 +179,11 @@ struct pmc_accessor pmc_accessors[] = {
\
if (set_expected) \
__GUEST_ASSERT((_tval & mask), \
"tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \
"tval: 0x%lx; mask: 0x%lx; set_expected: %u", \
_tval, mask, set_expected); \
else \
__GUEST_ASSERT(!(_tval & mask), \
"tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \
"tval: 0x%lx; mask: 0x%lx; set_expected: %u", \
_tval, mask, set_expected); \
}
......@@ -286,7 +270,7 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
acc->write_typer(pmc_idx, write_data);
read_data = acc->read_typer(pmc_idx);
__GUEST_ASSERT(read_data == write_data,
"pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
"pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
/*
......@@ -297,14 +281,14 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx)
/* The count value must be 0, as it is disabled and reset */
__GUEST_ASSERT(read_data == 0,
"pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx",
"pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx",
pmc_idx, PMC_ACC_TO_IDX(acc), read_data);
write_data = read_data + pmc_idx + 0x12345;
acc->write_cntr(pmc_idx, write_data);
read_data = acc->read_cntr(pmc_idx);
__GUEST_ASSERT(read_data == write_data,
"pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
"pmc_idx: 0x%x; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx",
pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data);
}
......@@ -379,7 +363,7 @@ static void guest_code(uint64_t expected_pmcr_n)
int i, pmc;
__GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
"Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx",
"Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%x",
expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
pmcr = read_sysreg(pmcr_el0);
......
......@@ -1150,10 +1150,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
&stat_fops_per_vm);
}
ret = kvm_arch_create_vm_debugfs(kvm);
if (ret)
goto out_err;
kvm_arch_create_vm_debugfs(kvm);
return 0;
out_err:
kvm_destroy_vm_debugfs(kvm);
......@@ -1183,9 +1180,8 @@ void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
* Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
* a per-arch destroy interface is not needed.
*/
int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
return 0;
}
static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment