Commit d084ecc5 authored by David Brazdil's avatar David Brazdil Committed by Marc Zyngier

KVM: arm64: Add offset for hyp VA <-> PA conversion

Add a host-initialized constant to KVM nVHE hyp code for converting
between EL2 linear map virtual addresses and physical addresses.
Also add `__hyp_pa` macro that performs the conversion.
Signed-off-by: default avatarDavid Brazdil <dbrazdil@google.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20201202184122.26046-18-dbrazdil@google.com
parent eeeee719
...@@ -18,6 +18,9 @@ ...@@ -18,6 +18,9 @@
/* Config options set by the host. */ /* Config options set by the host. */
__ro_after_init u32 kvm_host_psci_version; __ro_after_init u32 kvm_host_psci_version;
__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids; __ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
__ro_after_init s64 hyp_physvirt_offset;
#define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt) static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
{ {
......
...@@ -23,6 +23,30 @@ static u8 tag_lsb; ...@@ -23,6 +23,30 @@ static u8 tag_lsb;
static u64 tag_val; static u64 tag_val;
static u64 va_mask; static u64 va_mask;
/*
* Compute HYP VA by using the same computation as kern_hyp_va().
*/
static u64 __early_kern_hyp_va(u64 addr)
{
addr &= va_mask;
addr |= tag_val << tag_lsb;
return addr;
}
/*
* Store a hyp VA <-> PA offset into a hyp-owned variable.
*/
static void init_hyp_physvirt_offset(void)
{
extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
u64 kern_va, hyp_va;
/* Compute the offset from the hyp VA and PA of a random symbol. */
kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
hyp_va = __early_kern_hyp_va(kern_va);
CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
}
/* /*
* We want to generate a hyp VA with the following format (with V == * We want to generate a hyp VA with the following format (with V ==
* vabits_actual): * vabits_actual):
...@@ -54,6 +78,8 @@ __init void kvm_compute_layout(void) ...@@ -54,6 +78,8 @@ __init void kvm_compute_layout(void)
tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb); tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
} }
tag_val >>= tag_lsb; tag_val >>= tag_lsb;
init_hyp_physvirt_offset();
} }
static u32 compute_instruction(int n, u32 rd, u32 rn) static u32 compute_instruction(int n, u32 rd, u32 rn)
...@@ -151,9 +177,7 @@ void kvm_patch_vector_branch(struct alt_instr *alt, ...@@ -151,9 +177,7 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
/* /*
* Compute HYP VA by using the same computation as kern_hyp_va() * Compute HYP VA by using the same computation as kern_hyp_va()
*/ */
addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector); addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector));
addr &= va_mask;
addr |= tag_val << tag_lsb;
/* Use PC[10:7] to branch to the same vector in KVM */ /* Use PC[10:7] to branch to the same vector in KVM */
addr |= ((u64)origptr & GENMASK_ULL(10, 7)); addr |= ((u64)origptr & GENMASK_ULL(10, 7));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment