Commit 0829a076 authored by Catalin Marinas's avatar Catalin Marinas

Merge branch 'for-next/asm-annotations' into for-next/core

* for-next/asm-annotations:
  : Modernise arm64 assembly annotations
  arm64: head: Convert install_el2_stub to SYM_INNER_LABEL
  arm64: Mark call_smc_arch_workaround_1 as __maybe_unused
  arm64: entry-ftrace.S: Fix missing argument for CONFIG_FUNCTION_GRAPH_TRACER=y
  arm64: vdso32: Convert to modern assembler annotations
  arm64: vdso: Convert to modern assembler annotations
  arm64: sdei: Annotate SDEI entry points using new style annotations
  arm64: kvm: Modernize __smccc_workaround_1_smc_start annotations
  arm64: kvm: Modernize annotation for __bp_harden_hyp_vecs
  arm64: kvm: Annotate assembly using modern annoations
  arm64: kernel: Convert to modern annotations for assembly data
  arm64: head: Annotate stext and preserve_boot_args as code
  arm64: head.S: Convert to modern annotations for assembly functions
  arm64: ftrace: Modernise annotation of return_to_handler
  arm64: ftrace: Correct annotation of ftrace_caller assembly
  arm64: entry-ftrace.S: Convert to modern annotations for assembly functions
  arm64: entry: Additional annotation conversions for entry.S
  arm64: entry: Annotate ret_from_fork as code
  arm64: entry: Annotate vector table and handlers as code
  arm64: crypto: Modernize names for AES function macros
  arm64: crypto: Modernize some extra assembly annotations
parents da12d273 d4abd29d
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#define AES_ENTRY(func) SYM_FUNC_START(ce_ ## func) #define AES_FUNC_START(func) SYM_FUNC_START(ce_ ## func)
#define AES_ENDPROC(func) SYM_FUNC_END(ce_ ## func) #define AES_FUNC_END(func) SYM_FUNC_END(ce_ ## func)
.arch armv8-a+crypto .arch armv8-a+crypto
......
...@@ -51,7 +51,7 @@ SYM_FUNC_END(aes_decrypt_block5x) ...@@ -51,7 +51,7 @@ SYM_FUNC_END(aes_decrypt_block5x)
* int blocks) * int blocks)
*/ */
AES_ENTRY(aes_ecb_encrypt) AES_FUNC_START(aes_ecb_encrypt)
stp x29, x30, [sp, #-16]! stp x29, x30, [sp, #-16]!
mov x29, sp mov x29, sp
...@@ -79,10 +79,10 @@ ST5( st1 {v4.16b}, [x0], #16 ) ...@@ -79,10 +79,10 @@ ST5( st1 {v4.16b}, [x0], #16 )
.Lecbencout: .Lecbencout:
ldp x29, x30, [sp], #16 ldp x29, x30, [sp], #16
ret ret
AES_ENDPROC(aes_ecb_encrypt) AES_FUNC_END(aes_ecb_encrypt)
AES_ENTRY(aes_ecb_decrypt) AES_FUNC_START(aes_ecb_decrypt)
stp x29, x30, [sp, #-16]! stp x29, x30, [sp, #-16]!
mov x29, sp mov x29, sp
...@@ -110,7 +110,7 @@ ST5( st1 {v4.16b}, [x0], #16 ) ...@@ -110,7 +110,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
.Lecbdecout: .Lecbdecout:
ldp x29, x30, [sp], #16 ldp x29, x30, [sp], #16
ret ret
AES_ENDPROC(aes_ecb_decrypt) AES_FUNC_END(aes_ecb_decrypt)
/* /*
...@@ -126,7 +126,7 @@ AES_ENDPROC(aes_ecb_decrypt) ...@@ -126,7 +126,7 @@ AES_ENDPROC(aes_ecb_decrypt)
* u32 const rk2[]); * u32 const rk2[]);
*/ */
AES_ENTRY(aes_essiv_cbc_encrypt) AES_FUNC_START(aes_essiv_cbc_encrypt)
ld1 {v4.16b}, [x5] /* get iv */ ld1 {v4.16b}, [x5] /* get iv */
mov w8, #14 /* AES-256: 14 rounds */ mov w8, #14 /* AES-256: 14 rounds */
...@@ -135,7 +135,7 @@ AES_ENTRY(aes_essiv_cbc_encrypt) ...@@ -135,7 +135,7 @@ AES_ENTRY(aes_essiv_cbc_encrypt)
enc_switch_key w3, x2, x6 enc_switch_key w3, x2, x6
b .Lcbcencloop4x b .Lcbcencloop4x
AES_ENTRY(aes_cbc_encrypt) AES_FUNC_START(aes_cbc_encrypt)
ld1 {v4.16b}, [x5] /* get iv */ ld1 {v4.16b}, [x5] /* get iv */
enc_prepare w3, x2, x6 enc_prepare w3, x2, x6
...@@ -167,10 +167,10 @@ AES_ENTRY(aes_cbc_encrypt) ...@@ -167,10 +167,10 @@ AES_ENTRY(aes_cbc_encrypt)
.Lcbcencout: .Lcbcencout:
st1 {v4.16b}, [x5] /* return iv */ st1 {v4.16b}, [x5] /* return iv */
ret ret
AES_ENDPROC(aes_cbc_encrypt) AES_FUNC_END(aes_cbc_encrypt)
AES_ENDPROC(aes_essiv_cbc_encrypt) AES_FUNC_END(aes_essiv_cbc_encrypt)
AES_ENTRY(aes_essiv_cbc_decrypt) AES_FUNC_START(aes_essiv_cbc_decrypt)
stp x29, x30, [sp, #-16]! stp x29, x30, [sp, #-16]!
mov x29, sp mov x29, sp
...@@ -181,7 +181,7 @@ AES_ENTRY(aes_essiv_cbc_decrypt) ...@@ -181,7 +181,7 @@ AES_ENTRY(aes_essiv_cbc_decrypt)
encrypt_block cbciv, w8, x6, x7, w9 encrypt_block cbciv, w8, x6, x7, w9
b .Lessivcbcdecstart b .Lessivcbcdecstart
AES_ENTRY(aes_cbc_decrypt) AES_FUNC_START(aes_cbc_decrypt)
stp x29, x30, [sp, #-16]! stp x29, x30, [sp, #-16]!
mov x29, sp mov x29, sp
...@@ -238,8 +238,8 @@ ST5( st1 {v4.16b}, [x0], #16 ) ...@@ -238,8 +238,8 @@ ST5( st1 {v4.16b}, [x0], #16 )
st1 {cbciv.16b}, [x5] /* return iv */ st1 {cbciv.16b}, [x5] /* return iv */
ldp x29, x30, [sp], #16 ldp x29, x30, [sp], #16
ret ret
AES_ENDPROC(aes_cbc_decrypt) AES_FUNC_END(aes_cbc_decrypt)
AES_ENDPROC(aes_essiv_cbc_decrypt) AES_FUNC_END(aes_essiv_cbc_decrypt)
/* /*
...@@ -249,7 +249,7 @@ AES_ENDPROC(aes_essiv_cbc_decrypt) ...@@ -249,7 +249,7 @@ AES_ENDPROC(aes_essiv_cbc_decrypt)
* int rounds, int bytes, u8 const iv[]) * int rounds, int bytes, u8 const iv[])
*/ */
AES_ENTRY(aes_cbc_cts_encrypt) AES_FUNC_START(aes_cbc_cts_encrypt)
adr_l x8, .Lcts_permute_table adr_l x8, .Lcts_permute_table
sub x4, x4, #16 sub x4, x4, #16
add x9, x8, #32 add x9, x8, #32
...@@ -276,9 +276,9 @@ AES_ENTRY(aes_cbc_cts_encrypt) ...@@ -276,9 +276,9 @@ AES_ENTRY(aes_cbc_cts_encrypt)
st1 {v0.16b}, [x4] /* overlapping stores */ st1 {v0.16b}, [x4] /* overlapping stores */
st1 {v1.16b}, [x0] st1 {v1.16b}, [x0]
ret ret
AES_ENDPROC(aes_cbc_cts_encrypt) AES_FUNC_END(aes_cbc_cts_encrypt)
AES_ENTRY(aes_cbc_cts_decrypt) AES_FUNC_START(aes_cbc_cts_decrypt)
adr_l x8, .Lcts_permute_table adr_l x8, .Lcts_permute_table
sub x4, x4, #16 sub x4, x4, #16
add x9, x8, #32 add x9, x8, #32
...@@ -305,7 +305,7 @@ AES_ENTRY(aes_cbc_cts_decrypt) ...@@ -305,7 +305,7 @@ AES_ENTRY(aes_cbc_cts_decrypt)
st1 {v2.16b}, [x4] /* overlapping stores */ st1 {v2.16b}, [x4] /* overlapping stores */
st1 {v0.16b}, [x0] st1 {v0.16b}, [x0]
ret ret
AES_ENDPROC(aes_cbc_cts_decrypt) AES_FUNC_END(aes_cbc_cts_decrypt)
.section ".rodata", "a" .section ".rodata", "a"
.align 6 .align 6
...@@ -324,7 +324,7 @@ AES_ENDPROC(aes_cbc_cts_decrypt) ...@@ -324,7 +324,7 @@ AES_ENDPROC(aes_cbc_cts_decrypt)
* int blocks, u8 ctr[]) * int blocks, u8 ctr[])
*/ */
AES_ENTRY(aes_ctr_encrypt) AES_FUNC_START(aes_ctr_encrypt)
stp x29, x30, [sp, #-16]! stp x29, x30, [sp, #-16]!
mov x29, sp mov x29, sp
...@@ -409,7 +409,7 @@ ST5( st1 {v4.16b}, [x0], #16 ) ...@@ -409,7 +409,7 @@ ST5( st1 {v4.16b}, [x0], #16 )
rev x7, x7 rev x7, x7
ins vctr.d[0], x7 ins vctr.d[0], x7
b .Lctrcarrydone b .Lctrcarrydone
AES_ENDPROC(aes_ctr_encrypt) AES_FUNC_END(aes_ctr_encrypt)
/* /*
...@@ -433,7 +433,7 @@ AES_ENDPROC(aes_ctr_encrypt) ...@@ -433,7 +433,7 @@ AES_ENDPROC(aes_ctr_encrypt)
uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
.endm .endm
AES_ENTRY(aes_xts_encrypt) AES_FUNC_START(aes_xts_encrypt)
stp x29, x30, [sp, #-16]! stp x29, x30, [sp, #-16]!
mov x29, sp mov x29, sp
...@@ -518,9 +518,9 @@ AES_ENTRY(aes_xts_encrypt) ...@@ -518,9 +518,9 @@ AES_ENTRY(aes_xts_encrypt)
st1 {v2.16b}, [x4] /* overlapping stores */ st1 {v2.16b}, [x4] /* overlapping stores */
mov w4, wzr mov w4, wzr
b .Lxtsencctsout b .Lxtsencctsout
AES_ENDPROC(aes_xts_encrypt) AES_FUNC_END(aes_xts_encrypt)
AES_ENTRY(aes_xts_decrypt) AES_FUNC_START(aes_xts_decrypt)
stp x29, x30, [sp, #-16]! stp x29, x30, [sp, #-16]!
mov x29, sp mov x29, sp
...@@ -612,13 +612,13 @@ AES_ENTRY(aes_xts_decrypt) ...@@ -612,13 +612,13 @@ AES_ENTRY(aes_xts_decrypt)
st1 {v2.16b}, [x4] /* overlapping stores */ st1 {v2.16b}, [x4] /* overlapping stores */
mov w4, wzr mov w4, wzr
b .Lxtsdecctsout b .Lxtsdecctsout
AES_ENDPROC(aes_xts_decrypt) AES_FUNC_END(aes_xts_decrypt)
/* /*
* aes_mac_update(u8 const in[], u32 const rk[], int rounds, * aes_mac_update(u8 const in[], u32 const rk[], int rounds,
* int blocks, u8 dg[], int enc_before, int enc_after) * int blocks, u8 dg[], int enc_before, int enc_after)
*/ */
AES_ENTRY(aes_mac_update) AES_FUNC_START(aes_mac_update)
frame_push 6 frame_push 6
mov x19, x0 mov x19, x0
...@@ -676,4 +676,4 @@ AES_ENTRY(aes_mac_update) ...@@ -676,4 +676,4 @@ AES_ENTRY(aes_mac_update)
ld1 {v0.16b}, [x23] /* get dg */ ld1 {v0.16b}, [x23] /* get dg */
enc_prepare w21, x20, x0 enc_prepare w21, x20, x0
b .Lmacloop4x b .Lmacloop4x
AES_ENDPROC(aes_mac_update) AES_FUNC_END(aes_mac_update)
...@@ -8,8 +8,8 @@ ...@@ -8,8 +8,8 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h> #include <asm/assembler.h>
#define AES_ENTRY(func) SYM_FUNC_START(neon_ ## func) #define AES_FUNC_START(func) SYM_FUNC_START(neon_ ## func)
#define AES_ENDPROC(func) SYM_FUNC_END(neon_ ## func) #define AES_FUNC_END(func) SYM_FUNC_END(neon_ ## func)
xtsmask .req v7 xtsmask .req v7
cbciv .req v7 cbciv .req v7
......
...@@ -587,20 +587,20 @@ CPU_LE( rev w8, w8 ) ...@@ -587,20 +587,20 @@ CPU_LE( rev w8, w8 )
* struct ghash_key const *k, u64 dg[], u8 ctr[], * struct ghash_key const *k, u64 dg[], u8 ctr[],
* int rounds, u8 tag) * int rounds, u8 tag)
*/ */
ENTRY(pmull_gcm_encrypt) SYM_FUNC_START(pmull_gcm_encrypt)
pmull_gcm_do_crypt 1 pmull_gcm_do_crypt 1
ENDPROC(pmull_gcm_encrypt) SYM_FUNC_END(pmull_gcm_encrypt)
/* /*
* void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[], * void pmull_gcm_decrypt(int blocks, u8 dst[], const u8 src[],
* struct ghash_key const *k, u64 dg[], u8 ctr[], * struct ghash_key const *k, u64 dg[], u8 ctr[],
* int rounds, u8 tag) * int rounds, u8 tag)
*/ */
ENTRY(pmull_gcm_decrypt) SYM_FUNC_START(pmull_gcm_decrypt)
pmull_gcm_do_crypt 0 pmull_gcm_do_crypt 0
ENDPROC(pmull_gcm_decrypt) SYM_FUNC_END(pmull_gcm_decrypt)
pmull_gcm_ghash_4x: SYM_FUNC_START_LOCAL(pmull_gcm_ghash_4x)
movi MASK.16b, #0xe1 movi MASK.16b, #0xe1
shl MASK.2d, MASK.2d, #57 shl MASK.2d, MASK.2d, #57
...@@ -681,9 +681,9 @@ pmull_gcm_ghash_4x: ...@@ -681,9 +681,9 @@ pmull_gcm_ghash_4x:
eor XL.16b, XL.16b, T2.16b eor XL.16b, XL.16b, T2.16b
ret ret
ENDPROC(pmull_gcm_ghash_4x) SYM_FUNC_END(pmull_gcm_ghash_4x)
pmull_gcm_enc_4x: SYM_FUNC_START_LOCAL(pmull_gcm_enc_4x)
ld1 {KS0.16b}, [x5] // load upper counter ld1 {KS0.16b}, [x5] // load upper counter
sub w10, w8, #4 sub w10, w8, #4
sub w11, w8, #3 sub w11, w8, #3
...@@ -746,7 +746,7 @@ pmull_gcm_enc_4x: ...@@ -746,7 +746,7 @@ pmull_gcm_enc_4x:
eor INP3.16b, INP3.16b, KS3.16b eor INP3.16b, INP3.16b, KS3.16b
ret ret
ENDPROC(pmull_gcm_enc_4x) SYM_FUNC_END(pmull_gcm_enc_4x)
.section ".rodata", "a" .section ".rodata", "a"
.align 6 .align 6
......
...@@ -36,6 +36,8 @@ ...@@ -36,6 +36,8 @@
*/ */
#define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE) #define KVM_VECTOR_PREAMBLE (2 * AARCH64_INSN_SIZE)
#define __SMCCC_WORKAROUND_1_SMC_SZ 36
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/mm.h> #include <linux/mm.h>
...@@ -75,6 +77,8 @@ extern void __vgic_v3_init_lrs(void); ...@@ -75,6 +77,8 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void); extern u32 __kvm_get_mdcr_el2(void);
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */ /* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
#define __hyp_this_cpu_ptr(sym) \ #define __hyp_this_cpu_ptr(sym) \
({ \ ({ \
......
...@@ -480,7 +480,7 @@ static inline void *kvm_get_hyp_vector(void) ...@@ -480,7 +480,7 @@ static inline void *kvm_get_hyp_vector(void)
int slot = -1; int slot = -1;
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) { if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start)); vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot; slot = data->hyp_vectors_slot;
} }
...@@ -509,14 +509,13 @@ static inline int kvm_map_vectors(void) ...@@ -509,14 +509,13 @@ static inline int kvm_map_vectors(void)
* HBP + HEL2 -> use hardened vertors and use exec mapping * HBP + HEL2 -> use hardened vertors and use exec mapping
*/ */
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) { if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
__kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start); __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
} }
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start); phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
unsigned long size = (__bp_harden_hyp_vecs_end - unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
__bp_harden_hyp_vecs_start);
/* /*
* Always allocate a spare vector slot, as we don't * Always allocate a spare vector slot, as we don't
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#define TTBR_ASID_MASK (UL(0xffff) << 48) #define TTBR_ASID_MASK (UL(0xffff) << 48)
#define BP_HARDEN_EL2_SLOTS 4 #define BP_HARDEN_EL2_SLOTS 4
#define __BP_HARDEN_HYP_VECS_SZ (BP_HARDEN_EL2_SLOTS * SZ_2K)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
...@@ -45,7 +46,8 @@ struct bp_hardening_data { ...@@ -45,7 +46,8 @@ struct bp_hardening_data {
#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \ #if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) || \
defined(CONFIG_HARDEN_EL2_VECTORS)) defined(CONFIG_HARDEN_EL2_VECTORS))
extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
extern char __bp_harden_hyp_vecs[];
extern atomic_t arm64_el2_vector_last_slot; extern atomic_t arm64_el2_vector_last_slot;
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/kvm_asm.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
static bool __maybe_unused static bool __maybe_unused
...@@ -113,13 +114,10 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); ...@@ -113,13 +114,10 @@ atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#ifdef CONFIG_KVM_INDIRECT_VECTORS #ifdef CONFIG_KVM_INDIRECT_VECTORS
extern char __smccc_workaround_1_smc_start[];
extern char __smccc_workaround_1_smc_end[];
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end) const char *hyp_vecs_end)
{ {
void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
int i; int i;
for (i = 0; i < SZ_2K; i += 0x80) for (i = 0; i < SZ_2K; i += 0x80)
...@@ -163,9 +161,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn, ...@@ -163,9 +161,6 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
raw_spin_unlock(&bp_lock); raw_spin_unlock(&bp_lock);
} }
#else #else
#define __smccc_workaround_1_smc_start NULL
#define __smccc_workaround_1_smc_end NULL
static void install_bp_hardening_cb(bp_hardening_cb_t fn, static void install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start, const char *hyp_vecs_start,
const char *hyp_vecs_end) const char *hyp_vecs_end)
...@@ -176,7 +171,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn, ...@@ -176,7 +171,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
#include <linux/arm-smccc.h> #include <linux/arm-smccc.h>
static void call_smc_arch_workaround_1(void) static void __maybe_unused call_smc_arch_workaround_1(void)
{ {
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
} }
...@@ -239,11 +234,14 @@ static int detect_harden_bp_fw(void) ...@@ -239,11 +234,14 @@ static int detect_harden_bp_fw(void)
smccc_end = NULL; smccc_end = NULL;
break; break;
#if IS_ENABLED(CONFIG_KVM_ARM_HOST)
case SMCCC_CONDUIT_SMC: case SMCCC_CONDUIT_SMC:
cb = call_smc_arch_workaround_1; cb = call_smc_arch_workaround_1;
smccc_start = __smccc_workaround_1_smc_start; smccc_start = __smccc_workaround_1_smc;
smccc_end = __smccc_workaround_1_smc_end; smccc_end = __smccc_workaround_1_smc +
__SMCCC_WORKAROUND_1_SMC_SZ;
break; break;
#endif
default: default:
return -1; return -1;
......
...@@ -75,27 +75,27 @@ ...@@ -75,27 +75,27 @@
add x29, sp, #S_STACKFRAME add x29, sp, #S_STACKFRAME
.endm .endm
ENTRY(ftrace_regs_caller) SYM_CODE_START(ftrace_regs_caller)
ftrace_regs_entry 1 ftrace_regs_entry 1
b ftrace_common b ftrace_common
ENDPROC(ftrace_regs_caller) SYM_CODE_END(ftrace_regs_caller)
ENTRY(ftrace_caller) SYM_CODE_START(ftrace_caller)
ftrace_regs_entry 0 ftrace_regs_entry 0
b ftrace_common b ftrace_common
ENDPROC(ftrace_caller) SYM_CODE_END(ftrace_caller)
ENTRY(ftrace_common) SYM_CODE_START(ftrace_common)
sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn) sub x0, x30, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
mov x1, x9 // parent_ip (callsite's LR) mov x1, x9 // parent_ip (callsite's LR)
ldr_l x2, function_trace_op // op ldr_l x2, function_trace_op // op
mov x3, sp // regs mov x3, sp // regs
GLOBAL(ftrace_call) SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
bl ftrace_stub bl ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
nop // If enabled, this will be replaced nop // If enabled, this will be replaced
// "b ftrace_graph_caller" // "b ftrace_graph_caller"
#endif #endif
...@@ -122,17 +122,17 @@ ftrace_common_return: ...@@ -122,17 +122,17 @@ ftrace_common_return:
add sp, sp, #S_FRAME_SIZE + 16 add sp, sp, #S_FRAME_SIZE + 16
ret x9 ret x9
ENDPROC(ftrace_common) SYM_CODE_END(ftrace_common)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller) SYM_CODE_START(ftrace_graph_caller)
ldr x0, [sp, #S_PC] ldr x0, [sp, #S_PC]
sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn) sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
add x1, sp, #S_LR // parent_ip (callsite's LR) add x1, sp, #S_LR // parent_ip (callsite's LR)
ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP) ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
bl prepare_ftrace_return bl prepare_ftrace_return
b ftrace_common_return b ftrace_common_return
ENDPROC(ftrace_graph_caller) SYM_CODE_END(ftrace_graph_caller)
#endif #endif
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
...@@ -218,7 +218,7 @@ ENDPROC(ftrace_graph_caller) ...@@ -218,7 +218,7 @@ ENDPROC(ftrace_graph_caller)
* - tracer function to probe instrumented function's entry, * - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook * - ftrace_graph_caller to set up an exit hook
*/ */
ENTRY(_mcount) SYM_FUNC_START(_mcount)
mcount_enter mcount_enter
ldr_l x2, ftrace_trace_function ldr_l x2, ftrace_trace_function
...@@ -242,7 +242,7 @@ skip_ftrace_call: // } ...@@ -242,7 +242,7 @@ skip_ftrace_call: // }
b.ne ftrace_graph_caller // ftrace_graph_caller(); b.ne ftrace_graph_caller // ftrace_graph_caller();
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
mcount_exit mcount_exit
ENDPROC(_mcount) SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount) EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount) NOKPROBE(_mcount)
...@@ -253,9 +253,9 @@ NOKPROBE(_mcount) ...@@ -253,9 +253,9 @@ NOKPROBE(_mcount)
* and later on, NOP to branch to ftrace_caller() when enabled or branch to * and later on, NOP to branch to ftrace_caller() when enabled or branch to
* NOP when disabled per-function base. * NOP when disabled per-function base.
*/ */
ENTRY(_mcount) SYM_FUNC_START(_mcount)
ret ret
ENDPROC(_mcount) SYM_FUNC_END(_mcount)
EXPORT_SYMBOL(_mcount) EXPORT_SYMBOL(_mcount)
NOKPROBE(_mcount) NOKPROBE(_mcount)
...@@ -268,24 +268,24 @@ NOKPROBE(_mcount) ...@@ -268,24 +268,24 @@ NOKPROBE(_mcount)
* - tracer function to probe instrumented function's entry, * - tracer function to probe instrumented function's entry,
* - ftrace_graph_caller to set up an exit hook * - ftrace_graph_caller to set up an exit hook
*/ */
ENTRY(ftrace_caller) SYM_FUNC_START(ftrace_caller)
mcount_enter mcount_enter
mcount_get_pc0 x0 // function's pc mcount_get_pc0 x0 // function's pc
mcount_get_lr x1 // function's lr mcount_get_lr x1 // function's lr
GLOBAL(ftrace_call) // tracer(pc, lr); SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) // tracer(pc, lr);
nop // This will be replaced with "bl xxx" nop // This will be replaced with "bl xxx"
// where xxx can be any kind of tracer. // where xxx can be any kind of tracer.
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call) // ftrace_graph_caller(); SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) // ftrace_graph_caller();
nop // If enabled, this will be replaced nop // If enabled, this will be replaced
// "b ftrace_graph_caller" // "b ftrace_graph_caller"
#endif #endif
mcount_exit mcount_exit
ENDPROC(ftrace_caller) SYM_FUNC_END(ftrace_caller)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
...@@ -298,20 +298,20 @@ ENDPROC(ftrace_caller) ...@@ -298,20 +298,20 @@ ENDPROC(ftrace_caller)
* the call stack in order to intercept instrumented function's return path * the call stack in order to intercept instrumented function's return path
* and run return_to_handler() later on its exit. * and run return_to_handler() later on its exit.
*/ */
ENTRY(ftrace_graph_caller) SYM_FUNC_START(ftrace_graph_caller)
mcount_get_pc x0 // function's pc mcount_get_pc x0 // function's pc
mcount_get_lr_addr x1 // pointer to function's saved lr mcount_get_lr_addr x1 // pointer to function's saved lr
mcount_get_parent_fp x2 // parent's fp mcount_get_parent_fp x2 // parent's fp
bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp) bl prepare_ftrace_return // prepare_ftrace_return(pc, &lr, fp)
mcount_exit mcount_exit
ENDPROC(ftrace_graph_caller) SYM_FUNC_END(ftrace_graph_caller)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
ENTRY(ftrace_stub) SYM_FUNC_START(ftrace_stub)
ret ret
ENDPROC(ftrace_stub) SYM_FUNC_END(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* /*
...@@ -320,7 +320,7 @@ ENDPROC(ftrace_stub) ...@@ -320,7 +320,7 @@ ENDPROC(ftrace_stub)
* Run ftrace_return_to_handler() before going back to parent. * Run ftrace_return_to_handler() before going back to parent.
* @fp is checked against the value passed by ftrace_graph_caller(). * @fp is checked against the value passed by ftrace_graph_caller().
*/ */
ENTRY(return_to_handler) SYM_CODE_START(return_to_handler)
/* save return value regs */ /* save return value regs */
sub sp, sp, #64 sub sp, sp, #64
stp x0, x1, [sp] stp x0, x1, [sp]
...@@ -340,5 +340,5 @@ ENTRY(return_to_handler) ...@@ -340,5 +340,5 @@ ENTRY(return_to_handler)
add sp, sp, #64 add sp, sp, #64
ret ret
END(return_to_handler) SYM_CODE_END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -465,7 +465,7 @@ alternative_endif ...@@ -465,7 +465,7 @@ alternative_endif
.pushsection ".entry.text", "ax" .pushsection ".entry.text", "ax"
.align 11 .align 11
ENTRY(vectors) SYM_CODE_START(vectors)
kernel_ventry 1, sync_invalid // Synchronous EL1t kernel_ventry 1, sync_invalid // Synchronous EL1t
kernel_ventry 1, irq_invalid // IRQ EL1t kernel_ventry 1, irq_invalid // IRQ EL1t
kernel_ventry 1, fiq_invalid // FIQ EL1t kernel_ventry 1, fiq_invalid // FIQ EL1t
...@@ -492,7 +492,7 @@ ENTRY(vectors) ...@@ -492,7 +492,7 @@ ENTRY(vectors)
kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0
kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0
#endif #endif
END(vectors) SYM_CODE_END(vectors)
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
/* /*
...@@ -534,57 +534,57 @@ __bad_stack: ...@@ -534,57 +534,57 @@ __bad_stack:
ASM_BUG() ASM_BUG()
.endm .endm
el0_sync_invalid: SYM_CODE_START_LOCAL(el0_sync_invalid)
inv_entry 0, BAD_SYNC inv_entry 0, BAD_SYNC
ENDPROC(el0_sync_invalid) SYM_CODE_END(el0_sync_invalid)
el0_irq_invalid: SYM_CODE_START_LOCAL(el0_irq_invalid)
inv_entry 0, BAD_IRQ inv_entry 0, BAD_IRQ
ENDPROC(el0_irq_invalid) SYM_CODE_END(el0_irq_invalid)
el0_fiq_invalid: SYM_CODE_START_LOCAL(el0_fiq_invalid)
inv_entry 0, BAD_FIQ inv_entry 0, BAD_FIQ
ENDPROC(el0_fiq_invalid) SYM_CODE_END(el0_fiq_invalid)
el0_error_invalid: SYM_CODE_START_LOCAL(el0_error_invalid)
inv_entry 0, BAD_ERROR inv_entry 0, BAD_ERROR
ENDPROC(el0_error_invalid) SYM_CODE_END(el0_error_invalid)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
el0_fiq_invalid_compat: SYM_CODE_START_LOCAL(el0_fiq_invalid_compat)
inv_entry 0, BAD_FIQ, 32 inv_entry 0, BAD_FIQ, 32
ENDPROC(el0_fiq_invalid_compat) SYM_CODE_END(el0_fiq_invalid_compat)
#endif #endif
el1_sync_invalid: SYM_CODE_START_LOCAL(el1_sync_invalid)
inv_entry 1, BAD_SYNC inv_entry 1, BAD_SYNC
ENDPROC(el1_sync_invalid) SYM_CODE_END(el1_sync_invalid)
el1_irq_invalid: SYM_CODE_START_LOCAL(el1_irq_invalid)
inv_entry 1, BAD_IRQ inv_entry 1, BAD_IRQ
ENDPROC(el1_irq_invalid) SYM_CODE_END(el1_irq_invalid)
el1_fiq_invalid: SYM_CODE_START_LOCAL(el1_fiq_invalid)
inv_entry 1, BAD_FIQ inv_entry 1, BAD_FIQ
ENDPROC(el1_fiq_invalid) SYM_CODE_END(el1_fiq_invalid)
el1_error_invalid: SYM_CODE_START_LOCAL(el1_error_invalid)
inv_entry 1, BAD_ERROR inv_entry 1, BAD_ERROR
ENDPROC(el1_error_invalid) SYM_CODE_END(el1_error_invalid)
/* /*
* EL1 mode handlers. * EL1 mode handlers.
*/ */
.align 6 .align 6
el1_sync: SYM_CODE_START_LOCAL_NOALIGN(el1_sync)
kernel_entry 1 kernel_entry 1
mov x0, sp mov x0, sp
bl el1_sync_handler bl el1_sync_handler
kernel_exit 1 kernel_exit 1
ENDPROC(el1_sync) SYM_CODE_END(el1_sync)
.align 6 .align 6
el1_irq: SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1 kernel_entry 1
gic_prio_irq_setup pmr=x20, tmp=x1 gic_prio_irq_setup pmr=x20, tmp=x1
enable_da_f enable_da_f
...@@ -639,42 +639,42 @@ alternative_else_nop_endif ...@@ -639,42 +639,42 @@ alternative_else_nop_endif
#endif #endif
kernel_exit 1 kernel_exit 1
ENDPROC(el1_irq) SYM_CODE_END(el1_irq)
/* /*
* EL0 mode handlers. * EL0 mode handlers.
*/ */
.align 6 .align 6
el0_sync: SYM_CODE_START_LOCAL_NOALIGN(el0_sync)
kernel_entry 0 kernel_entry 0
mov x0, sp mov x0, sp
bl el0_sync_handler bl el0_sync_handler
b ret_to_user b ret_to_user
ENDPROC(el0_sync) SYM_CODE_END(el0_sync)
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.align 6 .align 6
el0_sync_compat: SYM_CODE_START_LOCAL_NOALIGN(el0_sync_compat)
kernel_entry 0, 32 kernel_entry 0, 32
mov x0, sp mov x0, sp
bl el0_sync_compat_handler bl el0_sync_compat_handler
b ret_to_user b ret_to_user
ENDPROC(el0_sync_compat) SYM_CODE_END(el0_sync_compat)
.align 6 .align 6
el0_irq_compat: SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
kernel_entry 0, 32 kernel_entry 0, 32
b el0_irq_naked b el0_irq_naked
ENDPROC(el0_irq_compat) SYM_CODE_END(el0_irq_compat)
el0_error_compat: SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
kernel_entry 0, 32 kernel_entry 0, 32
b el0_error_naked b el0_error_naked
ENDPROC(el0_error_compat) SYM_CODE_END(el0_error_compat)
#endif #endif
.align 6 .align 6
el0_irq: SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0 kernel_entry 0
el0_irq_naked: el0_irq_naked:
gic_prio_irq_setup pmr=x20, tmp=x0 gic_prio_irq_setup pmr=x20, tmp=x0
...@@ -696,9 +696,9 @@ el0_irq_naked: ...@@ -696,9 +696,9 @@ el0_irq_naked:
bl trace_hardirqs_on bl trace_hardirqs_on
#endif #endif
b ret_to_user b ret_to_user
ENDPROC(el0_irq) SYM_CODE_END(el0_irq)
el1_error: SYM_CODE_START_LOCAL(el1_error)
kernel_entry 1 kernel_entry 1
mrs x1, esr_el1 mrs x1, esr_el1
gic_prio_kentry_setup tmp=x2 gic_prio_kentry_setup tmp=x2
...@@ -706,9 +706,9 @@ el1_error: ...@@ -706,9 +706,9 @@ el1_error:
mov x0, sp mov x0, sp
bl do_serror bl do_serror
kernel_exit 1 kernel_exit 1
ENDPROC(el1_error) SYM_CODE_END(el1_error)
el0_error: SYM_CODE_START_LOCAL(el0_error)
kernel_entry 0 kernel_entry 0
el0_error_naked: el0_error_naked:
mrs x25, esr_el1 mrs x25, esr_el1
...@@ -720,7 +720,7 @@ el0_error_naked: ...@@ -720,7 +720,7 @@ el0_error_naked:
bl do_serror bl do_serror
enable_da_f enable_da_f
b ret_to_user b ret_to_user
ENDPROC(el0_error) SYM_CODE_END(el0_error)
/* /*
* Ok, we need to do extra processing, enter the slow path. * Ok, we need to do extra processing, enter the slow path.
...@@ -832,7 +832,7 @@ alternative_else_nop_endif ...@@ -832,7 +832,7 @@ alternative_else_nop_endif
.endm .endm
.align 11 .align 11
ENTRY(tramp_vectors) SYM_CODE_START_NOALIGN(tramp_vectors)
.space 0x400 .space 0x400
tramp_ventry tramp_ventry
...@@ -844,24 +844,24 @@ ENTRY(tramp_vectors) ...@@ -844,24 +844,24 @@ ENTRY(tramp_vectors)
tramp_ventry 32 tramp_ventry 32
tramp_ventry 32 tramp_ventry 32
tramp_ventry 32 tramp_ventry 32
END(tramp_vectors) SYM_CODE_END(tramp_vectors)
ENTRY(tramp_exit_native) SYM_CODE_START(tramp_exit_native)
tramp_exit tramp_exit
END(tramp_exit_native) SYM_CODE_END(tramp_exit_native)
ENTRY(tramp_exit_compat) SYM_CODE_START(tramp_exit_compat)
tramp_exit 32 tramp_exit 32
END(tramp_exit_compat) SYM_CODE_END(tramp_exit_compat)
.ltorg .ltorg
.popsection // .entry.tramp.text .popsection // .entry.tramp.text
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
.pushsection ".rodata", "a" .pushsection ".rodata", "a"
.align PAGE_SHIFT .align PAGE_SHIFT
.globl __entry_tramp_data_start SYM_DATA_START(__entry_tramp_data_start)
__entry_tramp_data_start:
.quad vectors .quad vectors
SYM_DATA_END(__entry_tramp_data_start)
.popsection // .rodata .popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
...@@ -874,7 +874,7 @@ __entry_tramp_data_start: ...@@ -874,7 +874,7 @@ __entry_tramp_data_start:
* Previous and next are guaranteed not to be the same. * Previous and next are guaranteed not to be the same.
* *
*/ */
ENTRY(cpu_switch_to) SYM_FUNC_START(cpu_switch_to)
mov x10, #THREAD_CPU_CONTEXT mov x10, #THREAD_CPU_CONTEXT
add x8, x0, x10 add x8, x0, x10
mov x9, sp mov x9, sp
...@@ -896,20 +896,20 @@ ENTRY(cpu_switch_to) ...@@ -896,20 +896,20 @@ ENTRY(cpu_switch_to)
mov sp, x9 mov sp, x9
msr sp_el0, x1 msr sp_el0, x1
ret ret
ENDPROC(cpu_switch_to) SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to) NOKPROBE(cpu_switch_to)
/* /*
* This is how we return from a fork. * This is how we return from a fork.
*/ */
ENTRY(ret_from_fork) SYM_CODE_START(ret_from_fork)
bl schedule_tail bl schedule_tail
cbz x19, 1f // not a kernel thread cbz x19, 1f // not a kernel thread
mov x0, x20 mov x0, x20
blr x19 blr x19
1: get_current_task tsk 1: get_current_task tsk
b ret_to_user b ret_to_user
ENDPROC(ret_from_fork) SYM_CODE_END(ret_from_fork)
NOKPROBE(ret_from_fork) NOKPROBE(ret_from_fork)
#ifdef CONFIG_ARM_SDE_INTERFACE #ifdef CONFIG_ARM_SDE_INTERFACE
...@@ -938,7 +938,7 @@ NOKPROBE(ret_from_fork) ...@@ -938,7 +938,7 @@ NOKPROBE(ret_from_fork)
*/ */
.ltorg .ltorg
.pushsection ".entry.tramp.text", "ax" .pushsection ".entry.tramp.text", "ax"
ENTRY(__sdei_asm_entry_trampoline) SYM_CODE_START(__sdei_asm_entry_trampoline)
mrs x4, ttbr1_el1 mrs x4, ttbr1_el1
tbz x4, #USER_ASID_BIT, 1f tbz x4, #USER_ASID_BIT, 1f
...@@ -960,7 +960,7 @@ ENTRY(__sdei_asm_entry_trampoline) ...@@ -960,7 +960,7 @@ ENTRY(__sdei_asm_entry_trampoline)
ldr x4, =__sdei_asm_handler ldr x4, =__sdei_asm_handler
#endif #endif
br x4 br x4
ENDPROC(__sdei_asm_entry_trampoline) SYM_CODE_END(__sdei_asm_entry_trampoline)
NOKPROBE(__sdei_asm_entry_trampoline) NOKPROBE(__sdei_asm_entry_trampoline)
/* /*
...@@ -970,21 +970,22 @@ NOKPROBE(__sdei_asm_entry_trampoline) ...@@ -970,21 +970,22 @@ NOKPROBE(__sdei_asm_entry_trampoline)
* x2: exit_mode * x2: exit_mode
* x4: struct sdei_registered_event argument from registration time. * x4: struct sdei_registered_event argument from registration time.
*/ */
ENTRY(__sdei_asm_exit_trampoline) SYM_CODE_START(__sdei_asm_exit_trampoline)
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
cbnz x4, 1f cbnz x4, 1f
tramp_unmap_kernel tmp=x4 tramp_unmap_kernel tmp=x4
1: sdei_handler_exit exit_mode=x2 1: sdei_handler_exit exit_mode=x2
ENDPROC(__sdei_asm_exit_trampoline) SYM_CODE_END(__sdei_asm_exit_trampoline)
NOKPROBE(__sdei_asm_exit_trampoline) NOKPROBE(__sdei_asm_exit_trampoline)
.ltorg .ltorg
.popsection // .entry.tramp.text .popsection // .entry.tramp.text
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
.pushsection ".rodata", "a" .pushsection ".rodata", "a"
__sdei_asm_trampoline_next_handler: SYM_DATA_START(__sdei_asm_trampoline_next_handler)
.quad __sdei_asm_handler .quad __sdei_asm_handler
SYM_DATA_END(__sdei_asm_trampoline_next_handler)
.popsection // .rodata .popsection // .rodata
#endif /* CONFIG_RANDOMIZE_BASE */ #endif /* CONFIG_RANDOMIZE_BASE */
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
...@@ -1002,7 +1003,7 @@ __sdei_asm_trampoline_next_handler: ...@@ -1002,7 +1003,7 @@ __sdei_asm_trampoline_next_handler:
* follow SMC-CC. We save (or retrieve) all the registers as the handler may * follow SMC-CC. We save (or retrieve) all the registers as the handler may
* want them. * want them.
*/ */
ENTRY(__sdei_asm_handler) SYM_CODE_START(__sdei_asm_handler)
stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
...@@ -1085,6 +1086,6 @@ alternative_else_nop_endif ...@@ -1085,6 +1086,6 @@ alternative_else_nop_endif
tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline
br x5 br x5
#endif #endif
ENDPROC(__sdei_asm_handler) SYM_CODE_END(__sdei_asm_handler)
NOKPROBE(__sdei_asm_handler) NOKPROBE(__sdei_asm_handler)
#endif /* CONFIG_ARM_SDE_INTERFACE */ #endif /* CONFIG_ARM_SDE_INTERFACE */
...@@ -105,7 +105,7 @@ pe_header: ...@@ -105,7 +105,7 @@ pe_header:
* x24 __primary_switch() .. relocate_kernel() * x24 __primary_switch() .. relocate_kernel()
* current RELR displacement * current RELR displacement
*/ */
ENTRY(stext) SYM_CODE_START(stext)
bl preserve_boot_args bl preserve_boot_args
bl el2_setup // Drop to EL1, w0=cpu_boot_mode bl el2_setup // Drop to EL1, w0=cpu_boot_mode
adrp x23, __PHYS_OFFSET adrp x23, __PHYS_OFFSET
...@@ -120,12 +120,12 @@ ENTRY(stext) ...@@ -120,12 +120,12 @@ ENTRY(stext)
*/ */
bl __cpu_setup // initialise processor bl __cpu_setup // initialise processor
b __primary_switch b __primary_switch
ENDPROC(stext) SYM_CODE_END(stext)
/* /*
* Preserve the arguments passed by the bootloader in x0 .. x3 * Preserve the arguments passed by the bootloader in x0 .. x3
*/ */
preserve_boot_args: SYM_CODE_START_LOCAL(preserve_boot_args)
mov x21, x0 // x21=FDT mov x21, x0 // x21=FDT
adr_l x0, boot_args // record the contents of adr_l x0, boot_args // record the contents of
...@@ -137,7 +137,7 @@ preserve_boot_args: ...@@ -137,7 +137,7 @@ preserve_boot_args:
mov x1, #0x20 // 4 x 8 bytes mov x1, #0x20 // 4 x 8 bytes
b __inval_dcache_area // tail call b __inval_dcache_area // tail call
ENDPROC(preserve_boot_args) SYM_CODE_END(preserve_boot_args)
/* /*
* Macro to create a table entry to the next page. * Macro to create a table entry to the next page.
...@@ -275,7 +275,7 @@ ENDPROC(preserve_boot_args) ...@@ -275,7 +275,7 @@ ENDPROC(preserve_boot_args)
* - first few MB of the kernel linear mapping to jump to once the MMU has * - first few MB of the kernel linear mapping to jump to once the MMU has
* been enabled * been enabled
*/ */
__create_page_tables: SYM_FUNC_START_LOCAL(__create_page_tables)
mov x28, lr mov x28, lr
/* /*
...@@ -403,14 +403,14 @@ __create_page_tables: ...@@ -403,14 +403,14 @@ __create_page_tables:
bl __inval_dcache_area bl __inval_dcache_area
ret x28 ret x28
ENDPROC(__create_page_tables) SYM_FUNC_END(__create_page_tables)
/* /*
* The following fragment of code is executed with the MMU enabled. * The following fragment of code is executed with the MMU enabled.
* *
* x0 = __PHYS_OFFSET * x0 = __PHYS_OFFSET
*/ */
__primary_switched: SYM_FUNC_START_LOCAL(__primary_switched)
adrp x4, init_thread_union adrp x4, init_thread_union
add sp, x4, #THREAD_SIZE add sp, x4, #THREAD_SIZE
adr_l x5, init_task adr_l x5, init_task
...@@ -455,7 +455,7 @@ __primary_switched: ...@@ -455,7 +455,7 @@ __primary_switched:
mov x29, #0 mov x29, #0
mov x30, #0 mov x30, #0
b start_kernel b start_kernel
ENDPROC(__primary_switched) SYM_FUNC_END(__primary_switched)
/* /*
* end early head section, begin head code that is also used for * end early head section, begin head code that is also used for
...@@ -463,8 +463,9 @@ ENDPROC(__primary_switched) ...@@ -463,8 +463,9 @@ ENDPROC(__primary_switched)
*/ */
.section ".idmap.text","awx" .section ".idmap.text","awx"
ENTRY(kimage_vaddr) SYM_DATA_START(kimage_vaddr)
.quad _text - TEXT_OFFSET .quad _text - TEXT_OFFSET
SYM_DATA_END(kimage_vaddr)
EXPORT_SYMBOL(kimage_vaddr) EXPORT_SYMBOL(kimage_vaddr)
/* /*
...@@ -474,7 +475,7 @@ EXPORT_SYMBOL(kimage_vaddr) ...@@ -474,7 +475,7 @@ EXPORT_SYMBOL(kimage_vaddr)
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
* booted in EL1 or EL2 respectively. * booted in EL1 or EL2 respectively.
*/ */
ENTRY(el2_setup) SYM_FUNC_START(el2_setup)
msr SPsel, #1 // We want to use SP_EL{1,2} msr SPsel, #1 // We want to use SP_EL{1,2}
mrs x0, CurrentEL mrs x0, CurrentEL
cmp x0, #CurrentEL_EL2 cmp x0, #CurrentEL_EL2
...@@ -598,7 +599,7 @@ set_hcr: ...@@ -598,7 +599,7 @@ set_hcr:
isb isb
ret ret
install_el2_stub: SYM_INNER_LABEL(install_el2_stub, SYM_L_LOCAL)
/* /*
* When VHE is not in use, early init of EL2 and EL1 needs to be * When VHE is not in use, early init of EL2 and EL1 needs to be
* done here. * done here.
...@@ -635,13 +636,13 @@ install_el2_stub: ...@@ -635,13 +636,13 @@ install_el2_stub:
msr elr_el2, lr msr elr_el2, lr
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
eret eret
ENDPROC(el2_setup) SYM_FUNC_END(el2_setup)
/* /*
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
* in w0. See arch/arm64/include/asm/virt.h for more info. * in w0. See arch/arm64/include/asm/virt.h for more info.
*/ */
set_cpu_boot_mode_flag: SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
adr_l x1, __boot_cpu_mode adr_l x1, __boot_cpu_mode
cmp w0, #BOOT_CPU_MODE_EL2 cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f b.ne 1f
...@@ -650,7 +651,7 @@ set_cpu_boot_mode_flag: ...@@ -650,7 +651,7 @@ set_cpu_boot_mode_flag:
dmb sy dmb sy
dc ivac, x1 // Invalidate potentially stale cache line dc ivac, x1 // Invalidate potentially stale cache line
ret ret
ENDPROC(set_cpu_boot_mode_flag) SYM_FUNC_END(set_cpu_boot_mode_flag)
/* /*
* These values are written with the MMU off, but read with the MMU on. * These values are written with the MMU off, but read with the MMU on.
...@@ -666,15 +667,17 @@ ENDPROC(set_cpu_boot_mode_flag) ...@@ -666,15 +667,17 @@ ENDPROC(set_cpu_boot_mode_flag)
* This is not in .bss, because we set it sufficiently early that the boot-time * This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it. * zeroing of .bss would clobber it.
*/ */
ENTRY(__boot_cpu_mode) SYM_DATA_START(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL2 .long BOOT_CPU_MODE_EL2
.long BOOT_CPU_MODE_EL1 .long BOOT_CPU_MODE_EL1
SYM_DATA_END(__boot_cpu_mode)
/* /*
* The booting CPU updates the failed status @__early_cpu_boot_status, * The booting CPU updates the failed status @__early_cpu_boot_status,
* with MMU turned off. * with MMU turned off.
*/ */
ENTRY(__early_cpu_boot_status) SYM_DATA_START(__early_cpu_boot_status)
.quad 0 .quad 0
SYM_DATA_END(__early_cpu_boot_status)
.popsection .popsection
...@@ -682,7 +685,7 @@ ENTRY(__early_cpu_boot_status) ...@@ -682,7 +685,7 @@ ENTRY(__early_cpu_boot_status)
* This provides a "holding pen" for platforms to hold all secondary * This provides a "holding pen" for platforms to hold all secondary
* cores are held until we're ready for them to initialise. * cores are held until we're ready for them to initialise.
*/ */
ENTRY(secondary_holding_pen) SYM_FUNC_START(secondary_holding_pen)
bl el2_setup // Drop to EL1, w0=cpu_boot_mode bl el2_setup // Drop to EL1, w0=cpu_boot_mode
bl set_cpu_boot_mode_flag bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1 mrs x0, mpidr_el1
...@@ -694,19 +697,19 @@ pen: ldr x4, [x3] ...@@ -694,19 +697,19 @@ pen: ldr x4, [x3]
b.eq secondary_startup b.eq secondary_startup
wfe wfe
b pen b pen
ENDPROC(secondary_holding_pen) SYM_FUNC_END(secondary_holding_pen)
/* /*
* Secondary entry point that jumps straight into the kernel. Only to * Secondary entry point that jumps straight into the kernel. Only to
* be used where CPUs are brought online dynamically by the kernel. * be used where CPUs are brought online dynamically by the kernel.
*/ */
ENTRY(secondary_entry) SYM_FUNC_START(secondary_entry)
bl el2_setup // Drop to EL1 bl el2_setup // Drop to EL1
bl set_cpu_boot_mode_flag bl set_cpu_boot_mode_flag
b secondary_startup b secondary_startup
ENDPROC(secondary_entry) SYM_FUNC_END(secondary_entry)
secondary_startup: SYM_FUNC_START_LOCAL(secondary_startup)
/* /*
* Common entry point for secondary CPUs. * Common entry point for secondary CPUs.
*/ */
...@@ -716,9 +719,9 @@ secondary_startup: ...@@ -716,9 +719,9 @@ secondary_startup:
bl __enable_mmu bl __enable_mmu
ldr x8, =__secondary_switched ldr x8, =__secondary_switched
br x8 br x8
ENDPROC(secondary_startup) SYM_FUNC_END(secondary_startup)
__secondary_switched: SYM_FUNC_START_LOCAL(__secondary_switched)
adr_l x5, vectors adr_l x5, vectors
msr vbar_el1, x5 msr vbar_el1, x5
isb isb
...@@ -733,13 +736,13 @@ __secondary_switched: ...@@ -733,13 +736,13 @@ __secondary_switched:
mov x29, #0 mov x29, #0
mov x30, #0 mov x30, #0
b secondary_start_kernel b secondary_start_kernel
ENDPROC(__secondary_switched) SYM_FUNC_END(__secondary_switched)
__secondary_too_slow: SYM_FUNC_START_LOCAL(__secondary_too_slow)
wfe wfe
wfi wfi
b __secondary_too_slow b __secondary_too_slow
ENDPROC(__secondary_too_slow) SYM_FUNC_END(__secondary_too_slow)
/* /*
* The booting CPU updates the failed status @__early_cpu_boot_status, * The booting CPU updates the failed status @__early_cpu_boot_status,
...@@ -771,7 +774,7 @@ ENDPROC(__secondary_too_slow) ...@@ -771,7 +774,7 @@ ENDPROC(__secondary_too_slow)
* Checks if the selected granule size is supported by the CPU. * Checks if the selected granule size is supported by the CPU.
* If it isn't, park the CPU * If it isn't, park the CPU
*/ */
ENTRY(__enable_mmu) SYM_FUNC_START(__enable_mmu)
mrs x2, ID_AA64MMFR0_EL1 mrs x2, ID_AA64MMFR0_EL1
ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
...@@ -795,9 +798,9 @@ ENTRY(__enable_mmu) ...@@ -795,9 +798,9 @@ ENTRY(__enable_mmu)
dsb nsh dsb nsh
isb isb
ret ret
ENDPROC(__enable_mmu) SYM_FUNC_END(__enable_mmu)
ENTRY(__cpu_secondary_check52bitva) SYM_FUNC_START(__cpu_secondary_check52bitva)
#ifdef CONFIG_ARM64_VA_BITS_52 #ifdef CONFIG_ARM64_VA_BITS_52
ldr_l x0, vabits_actual ldr_l x0, vabits_actual
cmp x0, #52 cmp x0, #52
...@@ -815,9 +818,9 @@ ENTRY(__cpu_secondary_check52bitva) ...@@ -815,9 +818,9 @@ ENTRY(__cpu_secondary_check52bitva)
#endif #endif
2: ret 2: ret
ENDPROC(__cpu_secondary_check52bitva) SYM_FUNC_END(__cpu_secondary_check52bitva)
__no_granule_support: SYM_FUNC_START_LOCAL(__no_granule_support)
/* Indicate that this CPU can't boot and is stuck in the kernel */ /* Indicate that this CPU can't boot and is stuck in the kernel */
update_early_cpu_boot_status \ update_early_cpu_boot_status \
CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2
...@@ -825,10 +828,10 @@ __no_granule_support: ...@@ -825,10 +828,10 @@ __no_granule_support:
wfe wfe
wfi wfi
b 1b b 1b
ENDPROC(__no_granule_support) SYM_FUNC_END(__no_granule_support)
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
__relocate_kernel: SYM_FUNC_START_LOCAL(__relocate_kernel)
/* /*
* Iterate over each entry in the relocation table, and apply the * Iterate over each entry in the relocation table, and apply the
* relocations in place. * relocations in place.
...@@ -930,10 +933,10 @@ __relocate_kernel: ...@@ -930,10 +933,10 @@ __relocate_kernel:
#endif #endif
ret ret
ENDPROC(__relocate_kernel) SYM_FUNC_END(__relocate_kernel)
#endif #endif
__primary_switch: SYM_FUNC_START_LOCAL(__primary_switch)
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
mov x19, x0 // preserve new SCTLR_EL1 value mov x19, x0 // preserve new SCTLR_EL1 value
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
...@@ -976,4 +979,4 @@ __primary_switch: ...@@ -976,4 +979,4 @@ __primary_switch:
ldr x8, =__primary_switched ldr x8, =__primary_switched
adrp x0, __PHYS_OFFSET adrp x0, __PHYS_OFFSET
br x8 br x8
ENDPROC(__primary_switch) SYM_FUNC_END(__primary_switch)
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
.text .text
nop nop
ENTRY(__kernel_rt_sigreturn) SYM_FUNC_START(__kernel_rt_sigreturn)
.cfi_startproc .cfi_startproc
.cfi_signal_frame .cfi_signal_frame
.cfi_def_cfa x29, 0 .cfi_def_cfa x29, 0
...@@ -23,4 +23,4 @@ ENTRY(__kernel_rt_sigreturn) ...@@ -23,4 +23,4 @@ ENTRY(__kernel_rt_sigreturn)
mov x8, #__NR_rt_sigreturn mov x8, #__NR_rt_sigreturn
svc #0 svc #0
.cfi_endproc .cfi_endproc
ENDPROC(__kernel_rt_sigreturn) SYM_FUNC_END(__kernel_rt_sigreturn)
...@@ -10,13 +10,6 @@ ...@@ -10,13 +10,6 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#define ARM_ENTRY(name) \
ENTRY(name)
#define ARM_ENDPROC(name) \
.type name, %function; \
END(name)
.text .text
.arm .arm
...@@ -24,39 +17,39 @@ ...@@ -24,39 +17,39 @@
.save {r0-r15} .save {r0-r15}
.pad #COMPAT_SIGFRAME_REGS_OFFSET .pad #COMPAT_SIGFRAME_REGS_OFFSET
nop nop
ARM_ENTRY(__kernel_sigreturn_arm) SYM_FUNC_START(__kernel_sigreturn_arm)
mov r7, #__NR_compat_sigreturn mov r7, #__NR_compat_sigreturn
svc #0 svc #0
.fnend .fnend
ARM_ENDPROC(__kernel_sigreturn_arm) SYM_FUNC_END(__kernel_sigreturn_arm)
.fnstart .fnstart
.save {r0-r15} .save {r0-r15}
.pad #COMPAT_RT_SIGFRAME_REGS_OFFSET .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
nop nop
ARM_ENTRY(__kernel_rt_sigreturn_arm) SYM_FUNC_START(__kernel_rt_sigreturn_arm)
mov r7, #__NR_compat_rt_sigreturn mov r7, #__NR_compat_rt_sigreturn
svc #0 svc #0
.fnend .fnend
ARM_ENDPROC(__kernel_rt_sigreturn_arm) SYM_FUNC_END(__kernel_rt_sigreturn_arm)
.thumb .thumb
.fnstart .fnstart
.save {r0-r15} .save {r0-r15}
.pad #COMPAT_SIGFRAME_REGS_OFFSET .pad #COMPAT_SIGFRAME_REGS_OFFSET
nop nop
ARM_ENTRY(__kernel_sigreturn_thumb) SYM_FUNC_START(__kernel_sigreturn_thumb)
mov r7, #__NR_compat_sigreturn mov r7, #__NR_compat_sigreturn
svc #0 svc #0
.fnend .fnend
ARM_ENDPROC(__kernel_sigreturn_thumb) SYM_FUNC_END(__kernel_sigreturn_thumb)
.fnstart .fnstart
.save {r0-r15} .save {r0-r15}
.pad #COMPAT_RT_SIGFRAME_REGS_OFFSET .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
nop nop
ARM_ENTRY(__kernel_rt_sigreturn_thumb) SYM_FUNC_START(__kernel_rt_sigreturn_thumb)
mov r7, #__NR_compat_rt_sigreturn mov r7, #__NR_compat_rt_sigreturn
svc #0 svc #0
.fnend .fnend
ARM_ENDPROC(__kernel_rt_sigreturn_thumb) SYM_FUNC_END(__kernel_rt_sigreturn_thumb)
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
.align 11 .align 11
ENTRY(__kvm_hyp_init) SYM_CODE_START(__kvm_hyp_init)
ventry __invalid // Synchronous EL2t ventry __invalid // Synchronous EL2t
ventry __invalid // IRQ EL2t ventry __invalid // IRQ EL2t
ventry __invalid // FIQ EL2t ventry __invalid // FIQ EL2t
...@@ -117,9 +117,9 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE) ...@@ -117,9 +117,9 @@ CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
/* Hello, World! */ /* Hello, World! */
eret eret
ENDPROC(__kvm_hyp_init) SYM_CODE_END(__kvm_hyp_init)
ENTRY(__kvm_handle_stub_hvc) SYM_CODE_START(__kvm_handle_stub_hvc)
cmp x0, #HVC_SOFT_RESTART cmp x0, #HVC_SOFT_RESTART
b.ne 1f b.ne 1f
...@@ -158,7 +158,7 @@ reset: ...@@ -158,7 +158,7 @@ reset:
ldr x0, =HVC_STUB_ERR ldr x0, =HVC_STUB_ERR
eret eret
ENDPROC(__kvm_handle_stub_hvc) SYM_CODE_END(__kvm_handle_stub_hvc)
.ltorg .ltorg
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
* and is used to implement hyp stubs in the same way as in * and is used to implement hyp stubs in the same way as in
* arch/arm64/kernel/hyp_stub.S. * arch/arm64/kernel/hyp_stub.S.
*/ */
ENTRY(__kvm_call_hyp) SYM_FUNC_START(__kvm_call_hyp)
hvc #0 hvc #0
ret ret
ENDPROC(__kvm_call_hyp) SYM_FUNC_END(__kvm_call_hyp)
...@@ -11,12 +11,12 @@ ...@@ -11,12 +11,12 @@
.text .text
.pushsection .hyp.text, "ax" .pushsection .hyp.text, "ax"
ENTRY(__fpsimd_save_state) SYM_FUNC_START(__fpsimd_save_state)
fpsimd_save x0, 1 fpsimd_save x0, 1
ret ret
ENDPROC(__fpsimd_save_state) SYM_FUNC_END(__fpsimd_save_state)
ENTRY(__fpsimd_restore_state) SYM_FUNC_START(__fpsimd_restore_state)
fpsimd_restore x0, 1 fpsimd_restore x0, 1
ret ret
ENDPROC(__fpsimd_restore_state) SYM_FUNC_END(__fpsimd_restore_state)
...@@ -180,7 +180,7 @@ el2_error: ...@@ -180,7 +180,7 @@ el2_error:
eret eret
sb sb
ENTRY(__hyp_do_panic) SYM_FUNC_START(__hyp_do_panic)
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h) PSR_MODE_EL1h)
msr spsr_el2, lr msr spsr_el2, lr
...@@ -188,18 +188,19 @@ ENTRY(__hyp_do_panic) ...@@ -188,18 +188,19 @@ ENTRY(__hyp_do_panic)
msr elr_el2, lr msr elr_el2, lr
eret eret
sb sb
ENDPROC(__hyp_do_panic) SYM_FUNC_END(__hyp_do_panic)
ENTRY(__hyp_panic) SYM_CODE_START(__hyp_panic)
get_host_ctxt x0, x1 get_host_ctxt x0, x1
b hyp_panic b hyp_panic
ENDPROC(__hyp_panic) SYM_CODE_END(__hyp_panic)
.macro invalid_vector label, target = __hyp_panic .macro invalid_vector label, target = __hyp_panic
.align 2 .align 2
SYM_CODE_START(\label)
\label: \label:
b \target b \target
ENDPROC(\label) SYM_CODE_END(\label)
.endm .endm
/* None of these should ever happen */ /* None of these should ever happen */
...@@ -246,7 +247,7 @@ check_preamble_length 661b, 662b ...@@ -246,7 +247,7 @@ check_preamble_length 661b, 662b
check_preamble_length 661b, 662b check_preamble_length 661b, 662b
.endm .endm
ENTRY(__kvm_hyp_vector) SYM_CODE_START(__kvm_hyp_vector)
invalid_vect el2t_sync_invalid // Synchronous EL2t invalid_vect el2t_sync_invalid // Synchronous EL2t
invalid_vect el2t_irq_invalid // IRQ EL2t invalid_vect el2t_irq_invalid // IRQ EL2t
invalid_vect el2t_fiq_invalid // FIQ EL2t invalid_vect el2t_fiq_invalid // FIQ EL2t
...@@ -266,7 +267,7 @@ ENTRY(__kvm_hyp_vector) ...@@ -266,7 +267,7 @@ ENTRY(__kvm_hyp_vector)
valid_vect el1_irq // IRQ 32-bit EL1 valid_vect el1_irq // IRQ 32-bit EL1
invalid_vect el1_fiq_invalid // FIQ 32-bit EL1 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
valid_vect el1_error // Error 32-bit EL1 valid_vect el1_error // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector) SYM_CODE_END(__kvm_hyp_vector)
#ifdef CONFIG_KVM_INDIRECT_VECTORS #ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry .macro hyp_ventry
...@@ -311,15 +312,17 @@ alternative_cb_end ...@@ -311,15 +312,17 @@ alternative_cb_end
.endm .endm
.align 11 .align 11
ENTRY(__bp_harden_hyp_vecs_start) SYM_CODE_START(__bp_harden_hyp_vecs)
.rept BP_HARDEN_EL2_SLOTS .rept BP_HARDEN_EL2_SLOTS
generate_vectors generate_vectors
.endr .endr
ENTRY(__bp_harden_hyp_vecs_end) 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
.org 1b
SYM_CODE_END(__bp_harden_hyp_vecs)
.popsection .popsection
ENTRY(__smccc_workaround_1_smc_start) SYM_CODE_START(__smccc_workaround_1_smc)
esb esb
sub sp, sp, #(8 * 4) sub sp, sp, #(8 * 4)
stp x2, x3, [sp, #(8 * 0)] stp x2, x3, [sp, #(8 * 0)]
...@@ -329,5 +332,7 @@ ENTRY(__smccc_workaround_1_smc_start) ...@@ -329,5 +332,7 @@ ENTRY(__smccc_workaround_1_smc_start)
ldp x2, x3, [sp, #(8 * 0)] ldp x2, x3, [sp, #(8 * 0)]
ldp x0, x1, [sp, #(8 * 2)] ldp x0, x1, [sp, #(8 * 2)]
add sp, sp, #(8 * 4) add sp, sp, #(8 * 4)
ENTRY(__smccc_workaround_1_smc_end) 1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
.org 1b
SYM_CODE_END(__smccc_workaround_1_smc)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment