kvm_asm.h 9.23 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2012,2013 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__

10
#include <asm/hyp_image.h>
11 12
#include <asm/virt.h>

13 14
#define ARM_EXIT_WITH_SERROR_BIT  31
#define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
15
#define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
16 17
#define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))

18
#define ARM_EXCEPTION_IRQ	  0
19 20
#define ARM_EXCEPTION_EL1_SERROR  1
#define ARM_EXCEPTION_TRAP	  2
21
#define ARM_EXCEPTION_IL	  3
22
/* The hyp-stub will return this for any kvm_call_hyp() call */
23
#define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
24

25 26 27 28 29 30
#define kvm_arm_exception_type					\
	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}

31 32 33 34
/*
 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
 * that jumps over this.
 */
35
#define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
#define KVM_HOST_SMCCC_ID(id)						\
	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
			   ARM_SMCCC_SMC_64,				\
			   ARM_SMCCC_OWNER_VENDOR_HYP,			\
			   (id))

#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)

#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run			1
#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context		2
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa		3
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid		4
#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_local_vmid	5
#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff		6
#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs			7
53
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config		8
54 55 56 57 58 59 60
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr		9
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr		10
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs		11
#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2		12
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs		13
#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs		14

61 62 63
#ifndef __ASSEMBLY__

#include <linux/mm.h>
64

65 66 67 68 69 70 71 72 73 74 75
#define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
#define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]

/*
 * Define a pair of symbols sharing the same name but one defined in
 * VHE and the other in nVHE hyp implementations.
 */
#define DECLARE_KVM_HYP_SYM(sym)		\
	DECLARE_KVM_VHE_SYM(sym);		\
	DECLARE_KVM_NVHE_SYM(sym)

76 77 78 79 80 81 82 83 84
#define DECLARE_KVM_VHE_PER_CPU(type, sym)	\
	DECLARE_PER_CPU(type, sym)
#define DECLARE_KVM_NVHE_PER_CPU(type, sym)	\
	DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))

#define DECLARE_KVM_HYP_PER_CPU(type, sym)	\
	DECLARE_KVM_VHE_PER_CPU(type, sym);	\
	DECLARE_KVM_NVHE_PER_CPU(type, sym)

85 86 87 88 89 90 91 92 93 94 95 96 97
/*
 * Compute pointer to a symbol defined in nVHE percpu region.
 * Returns NULL if percpu memory has not been allocated yet.
 */
#define this_cpu_ptr_nvhe_sym(sym)	per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
#define per_cpu_ptr_nvhe_sym(sym, cpu)						\
	({									\
		unsigned long base, off;					\
		base = kvm_arm_hyp_percpu_base[cpu];				\
		off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -			\
		      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);		\
		base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;	\
	})
98

99 100 101
#if defined(__KVM_NVHE_HYPERVISOR__)

#define CHOOSE_NVHE_SYM(sym)	sym
102 103
#define CHOOSE_HYP_SYM(sym)	CHOOSE_NVHE_SYM(sym)

104 105
/* The nVHE hypervisor shouldn't even try to access VHE symbols */
extern void *__nvhe_undefined_symbol;
106 107 108
#define CHOOSE_VHE_SYM(sym)		__nvhe_undefined_symbol
#define this_cpu_ptr_hyp_sym(sym)	(&__nvhe_undefined_symbol)
#define per_cpu_ptr_hyp_sym(sym, cpu)	(&__nvhe_undefined_symbol)
109

110
#elif defined(__KVM_VHE_HYPERVISOR__)
111

112
#define CHOOSE_VHE_SYM(sym)	sym
113 114
#define CHOOSE_HYP_SYM(sym)	CHOOSE_VHE_SYM(sym)

115 116
/* The VHE hypervisor shouldn't even try to access nVHE symbols */
extern void *__vhe_undefined_symbol;
117 118 119
#define CHOOSE_NVHE_SYM(sym)		__vhe_undefined_symbol
#define this_cpu_ptr_hyp_sym(sym)	(&__vhe_undefined_symbol)
#define per_cpu_ptr_hyp_sym(sym, cpu)	(&__vhe_undefined_symbol)
120 121

#else
122 123 124 125 126 127 128 129 130 131 132 133

/*
 * BIG FAT WARNINGS:
 *
 * - Don't be tempted to change the following is_kernel_in_hyp_mode()
 *   to has_vhe(). has_vhe() is implemented as a *final* capability,
 *   while this is used early at boot time, when the capabilities are
 *   not final yet....
 *
 * - Don't let the nVHE hypervisor have access to this, as it will
 *   pick the *wrong* symbol (yes, it runs at EL2...).
 */
134 135
#define CHOOSE_HYP_SYM(sym)		(is_kernel_in_hyp_mode()	\
					   ? CHOOSE_VHE_SYM(sym)	\
136
					   : CHOOSE_NVHE_SYM(sym))
137

138 139 140
#define this_cpu_ptr_hyp_sym(sym)	(is_kernel_in_hyp_mode()	\
					   ? this_cpu_ptr(&sym)		\
					   : this_cpu_ptr_nvhe_sym(sym))
141

142 143 144
#define per_cpu_ptr_hyp_sym(sym, cpu)	(is_kernel_in_hyp_mode()	\
					   ? per_cpu_ptr(&sym, cpu)	\
					   : per_cpu_ptr_nvhe_sym(sym, cpu))
145

146 147 148
#define CHOOSE_VHE_SYM(sym)	sym
#define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)

149
#endif
150

151
struct kvm_nvhe_init_params {
152 153
	unsigned long mair_el2;
	unsigned long tcr_el2;
154 155 156 157 158
	unsigned long tpidr_el2;
	unsigned long stack_hyp_va;
	phys_addr_t pgd_pa;
};

159 160
/* Translate a kernel address @ptr into its equivalent linear mapping */
#define kvm_ksym_ref(ptr)						\
161
	({								\
162
		void *val = (ptr);					\
163
		if (!is_kernel_in_hyp_mode())				\
164
			val = lm_alias((ptr));				\
165 166
		val;							\
	 })
167
#define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))
168

169 170
struct kvm;
struct kvm_vcpu;
171
struct kvm_s2_mmu;
172

173
DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
174
DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
175
#define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
176 177
#define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)

178 179 180 181
extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
DECLARE_KVM_NVHE_SYM(__per_cpu_start);
DECLARE_KVM_NVHE_SYM(__per_cpu_end);

182 183
DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
#define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
184 185

extern void __kvm_flush_vm_context(void);
186 187
extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
				     int level);
188 189
extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
extern void __kvm_tlb_flush_local_vmid(struct kvm_s2_mmu *mmu);
190

191
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
192

193
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
194

195
extern u64 __vgic_v3_get_gic_config(void);
196 197
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
198
extern void __vgic_v3_init_lrs(void);
199

200 201
extern u32 __kvm_get_mdcr_el2(void);

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
#define __KVM_EXTABLE(from, to)						\
	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
	"	.align		3\n"					\
	"	.long		(" #from " - .), (" #to " - .)\n"	\
	"	.popsection\n"


#define __kvm_at(at_op, addr)						\
( { 									\
	int __kvm_at_err = 0;						\
	u64 spsr, elr;							\
	asm volatile(							\
	"	mrs	%1, spsr_el2\n"					\
	"	mrs	%2, elr_el2\n"					\
	"1:	at	"at_op", %3\n"					\
	"	isb\n"							\
	"	b	9f\n"						\
	"2:	msr	spsr_el2, %1\n"					\
	"	msr	elr_el2, %2\n"					\
	"	mov	%w0, %4\n"					\
	"9:\n"								\
	__KVM_EXTABLE(1b, 2b)						\
	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
	: "r" (addr), "i" (-EFAULT));					\
	__kvm_at_err;							\
} )


230 231
#else /* __ASSEMBLY__ */

232
.macro get_host_ctxt reg, tmp
233
	adr_this_cpu \reg, kvm_host_data, \tmp
234
	add	\reg, \reg, #HOST_DATA_CONTEXT
235 236
.endm

237 238 239 240 241
.macro get_vcpu_ptr vcpu, ctxt
	get_host_ctxt \ctxt, \vcpu
	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm

242
.macro get_loaded_vcpu vcpu, ctxt
243
	adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
244 245 246 247
	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm

.macro set_loaded_vcpu vcpu, ctxt, tmp
248
	adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
249 250 251
	str	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
.endm

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
/*
 * KVM extable for unexpected exceptions.
 * In the same format _asm_extable, but output to a different section so that
 * it can be mapped to EL2. The KVM version is not sorted. The caller must
 * ensure:
 * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
 * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
 */
.macro	_kvm_extable, from, to
	.pushsection	__kvm_ex_table, "a"
	.align		3
	.long		(\from - .), (\to - .)
	.popsection
.endm

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
#define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
#define CPU_LR_OFFSET		CPU_XREG_OFFSET(30)
#define CPU_SP_EL0_OFFSET	(CPU_LR_OFFSET + 8)

/*
 * We treat x18 as callee-saved as the host may use it as a platform
 * register (e.g. for shadow call stack).
 */
.macro save_callee_saved_regs ctxt
	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
.endm

.macro restore_callee_saved_regs ctxt
	// We require \ctxt is not x18-x28
	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
.endm

.macro save_sp_el0 ctxt, tmp
	mrs	\tmp,	sp_el0
	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
.endm

.macro restore_sp_el0 ctxt, tmp
	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
	msr	sp_el0, \tmp
.endm

306 307 308
#endif

#endif /* __ARM_KVM_ASM_H__ */