Commit d3e1086c authored by David Brazdil's avatar David Brazdil Committed by Marc Zyngier

KVM: arm64: Init MAIR/TCR_EL2 from params struct

MAIR_EL2 and TCR_EL2 are currently initialized from their _EL1 values.
This will not work once KVM starts intercepting PSCI ON/SUSPEND SMCs
and initializing EL2 state before EL1 state.

Obtain the EL1 values during KVM init and store them in the init params
struct. The struct will stay in memory and can be used when booting new
cores.

Take the opportunity to move copying the T0SZ value from idmap_t0sz in
KVM init rather than in .hyp.idmap.text. This avoids the need for the
idmap_t0sz symbol alias.
Signed-off-by: default avatarDavid Brazdil <dbrazdil@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20201202184122.26046-12-dbrazdil@google.com
parent 63fec243
...@@ -151,6 +151,8 @@ extern void *__vhe_undefined_symbol; ...@@ -151,6 +151,8 @@ extern void *__vhe_undefined_symbol;
#endif #endif
struct kvm_nvhe_init_params { struct kvm_nvhe_init_params {
unsigned long mair_el2;
unsigned long tcr_el2;
unsigned long tpidr_el2; unsigned long tpidr_el2;
unsigned long stack_hyp_va; unsigned long stack_hyp_va;
phys_addr_t pgd_pa; phys_addr_t pgd_pa;
......
...@@ -109,6 +109,8 @@ int main(void) ...@@ -109,6 +109,8 @@ int main(void)
DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt)); DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
DEFINE(NVHE_INIT_MAIR_EL2, offsetof(struct kvm_nvhe_init_params, mair_el2));
DEFINE(NVHE_INIT_TCR_EL2, offsetof(struct kvm_nvhe_init_params, tcr_el2));
DEFINE(NVHE_INIT_TPIDR_EL2, offsetof(struct kvm_nvhe_init_params, tpidr_el2)); DEFINE(NVHE_INIT_TPIDR_EL2, offsetof(struct kvm_nvhe_init_params, tpidr_el2));
DEFINE(NVHE_INIT_STACK_HYP_VA, offsetof(struct kvm_nvhe_init_params, stack_hyp_va)); DEFINE(NVHE_INIT_STACK_HYP_VA, offsetof(struct kvm_nvhe_init_params, stack_hyp_va));
DEFINE(NVHE_INIT_PGD_PA, offsetof(struct kvm_nvhe_init_params, pgd_pa)); DEFINE(NVHE_INIT_PGD_PA, offsetof(struct kvm_nvhe_init_params, pgd_pa));
......
...@@ -77,9 +77,6 @@ KVM_NVHE_ALIAS(panic); ...@@ -77,9 +77,6 @@ KVM_NVHE_ALIAS(panic);
/* Vectors installed by hyp-init on reset HVC. */ /* Vectors installed by hyp-init on reset HVC. */
KVM_NVHE_ALIAS(__hyp_stub_vectors); KVM_NVHE_ALIAS(__hyp_stub_vectors);
/* IDMAP TCR_EL1.T0SZ as computed by the EL1 init code */
KVM_NVHE_ALIAS(idmap_t0sz);
/* Kernel symbol used by icache_is_vpipt(). */ /* Kernel symbol used by icache_is_vpipt(). */
KVM_NVHE_ALIAS(__icache_flags); KVM_NVHE_ALIAS(__icache_flags);
......
...@@ -1336,6 +1336,7 @@ static void cpu_init_hyp_mode(void) ...@@ -1336,6 +1336,7 @@ static void cpu_init_hyp_mode(void)
{ {
struct kvm_nvhe_init_params *params = this_cpu_ptr_nvhe_sym(kvm_init_params); struct kvm_nvhe_init_params *params = this_cpu_ptr_nvhe_sym(kvm_init_params);
struct arm_smccc_res res; struct arm_smccc_res res;
unsigned long tcr;
/* Switch from the HYP stub to our own HYP init vector */ /* Switch from the HYP stub to our own HYP init vector */
__hyp_set_vectors(kvm_get_idmap_vector()); __hyp_set_vectors(kvm_get_idmap_vector());
...@@ -1348,6 +1349,27 @@ static void cpu_init_hyp_mode(void) ...@@ -1348,6 +1349,27 @@ static void cpu_init_hyp_mode(void)
params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) - params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
(unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start)); (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
params->mair_el2 = read_sysreg(mair_el1);
/*
* The ID map may be configured to use an extended virtual address
* range. This is only the case if system RAM is out of range for the
* currently configured page size and VA_BITS, in which case we will
* also need the extended virtual range for the HYP ID map, or we won't
* be able to enable the EL2 MMU.
*
* However, at EL2, there is only one TTBR register, and we can't switch
* between translation tables *and* update TCR_EL2.T0SZ at the same
* time. Bottom line: we need to use the extended range with *both* our
* translation tables.
*
* So use the same T0SZ value we use for the ID map.
*/
tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1;
tcr &= ~TCR_T0SZ_MASK;
tcr |= (idmap_t0sz & GENMASK(TCR_TxSZ_WIDTH - 1, 0)) << TCR_T0SZ_OFFSET;
params->tcr_el2 = tcr;
params->stack_hyp_va = kern_hyp_va(__this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE); params->stack_hyp_va = kern_hyp_va(__this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE);
params->pgd_pa = kvm_mmu_get_httbr(); params->pgd_pa = kvm_mmu_get_httbr();
......
...@@ -71,48 +71,26 @@ __do_hyp_init: ...@@ -71,48 +71,26 @@ __do_hyp_init:
1: ldr x0, [x1, #NVHE_INIT_TPIDR_EL2] 1: ldr x0, [x1, #NVHE_INIT_TPIDR_EL2]
msr tpidr_el2, x0 msr tpidr_el2, x0
ldr x0, [x1, #NVHE_INIT_MAIR_EL2]
msr mair_el2, x0
ldr x0, [x1, #NVHE_INIT_STACK_HYP_VA] ldr x0, [x1, #NVHE_INIT_STACK_HYP_VA]
mov sp, x0 mov sp, x0
ldr x1, [x1, #NVHE_INIT_PGD_PA] ldr x0, [x1, #NVHE_INIT_PGD_PA]
phys_to_ttbr x0, x1 phys_to_ttbr x2, x0
alternative_if ARM64_HAS_CNP alternative_if ARM64_HAS_CNP
orr x0, x0, #TTBR_CNP_BIT orr x2, x2, #TTBR_CNP_BIT
alternative_else_nop_endif alternative_else_nop_endif
msr ttbr0_el2, x0 msr ttbr0_el2, x2
mrs x0, tcr_el1
mov_q x1, TCR_EL2_MASK
and x0, x0, x1
mov x1, #TCR_EL2_RES1
orr x0, x0, x1
/*
* The ID map may be configured to use an extended virtual address
* range. This is only the case if system RAM is out of range for the
* currently configured page size and VA_BITS, in which case we will
* also need the extended virtual range for the HYP ID map, or we won't
* be able to enable the EL2 MMU.
*
* However, at EL2, there is only one TTBR register, and we can't switch
* between translation tables *and* update TCR_EL2.T0SZ at the same
* time. Bottom line: we need to use the extended range with *both* our
* translation tables.
*
* So use the same T0SZ value we use for the ID map.
*/
ldr_l x1, idmap_t0sz
bfi x0, x1, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
/* /*
* Set the PS bits in TCR_EL2. * Set the PS bits in TCR_EL2.
*/ */
ldr x0, [x1, #NVHE_INIT_TCR_EL2]
tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2 tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
msr tcr_el2, x0 msr tcr_el2, x0
mrs x0, mair_el1
msr mair_el2, x0
isb isb
/* Invalidate the stale TLBs from Bootloader */ /* Invalidate the stale TLBs from Bootloader */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment