Commit 683c5bbb authored by Clément Léger's avatar Clément Léger Committed by Anup Patel

riscv: kvm: Use SYM_*() assembly macros instead of deprecated ones

ENTRY()/END()/WEAK() macros are deprecated and we should make use of the
new SYM_*() macros [1] for better annotation of symbols. Replace the
deprecated ones with the new ones and fix wrong usage of END()/ENDPROC()
to correctly describe the symbols.

[1] https://docs.kernel.org/core-api/asm-annotations.htmlSigned-off-by: default avatarClément Léger <cleger@rivosinc.com>
Reviewed-by: default avatarAndrew Jones <ajones@ventanamicro.com>
Acked-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
Signed-off-by: default avatarAnup Patel <anup@brainfault.org>
parent 861deac3
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
.altmacro .altmacro
.option norelax .option norelax
ENTRY(__kvm_riscv_switch_to) SYM_FUNC_START(__kvm_riscv_switch_to)
/* Save Host GPRs (except A0 and T0-T6) */ /* Save Host GPRs (except A0 and T0-T6) */
REG_S ra, (KVM_ARCH_HOST_RA)(a0) REG_S ra, (KVM_ARCH_HOST_RA)(a0)
REG_S sp, (KVM_ARCH_HOST_SP)(a0) REG_S sp, (KVM_ARCH_HOST_SP)(a0)
...@@ -208,9 +208,9 @@ __kvm_switch_return: ...@@ -208,9 +208,9 @@ __kvm_switch_return:
/* Return to C code */ /* Return to C code */
ret ret
ENDPROC(__kvm_riscv_switch_to) SYM_FUNC_END(__kvm_riscv_switch_to)
ENTRY(__kvm_riscv_unpriv_trap) SYM_CODE_START(__kvm_riscv_unpriv_trap)
/* /*
* We assume that faulting unpriv load/store instruction is * We assume that faulting unpriv load/store instruction is
* 4-byte long and blindly increment SEPC by 4. * 4-byte long and blindly increment SEPC by 4.
...@@ -231,12 +231,10 @@ ENTRY(__kvm_riscv_unpriv_trap) ...@@ -231,12 +231,10 @@ ENTRY(__kvm_riscv_unpriv_trap)
csrr a1, CSR_HTINST csrr a1, CSR_HTINST
REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0) REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0)
sret sret
ENDPROC(__kvm_riscv_unpriv_trap) SYM_CODE_END(__kvm_riscv_unpriv_trap)
#ifdef CONFIG_FPU #ifdef CONFIG_FPU
.align 3 SYM_FUNC_START(__kvm_riscv_fp_f_save)
.global __kvm_riscv_fp_f_save
__kvm_riscv_fp_f_save:
csrr t2, CSR_SSTATUS csrr t2, CSR_SSTATUS
li t1, SR_FS li t1, SR_FS
csrs CSR_SSTATUS, t1 csrs CSR_SSTATUS, t1
...@@ -276,10 +274,9 @@ __kvm_riscv_fp_f_save: ...@@ -276,10 +274,9 @@ __kvm_riscv_fp_f_save:
sw t0, KVM_ARCH_FP_F_FCSR(a0) sw t0, KVM_ARCH_FP_F_FCSR(a0)
csrw CSR_SSTATUS, t2 csrw CSR_SSTATUS, t2
ret ret
SYM_FUNC_END(__kvm_riscv_fp_f_save)
.align 3 SYM_FUNC_START(__kvm_riscv_fp_d_save)
.global __kvm_riscv_fp_d_save
__kvm_riscv_fp_d_save:
csrr t2, CSR_SSTATUS csrr t2, CSR_SSTATUS
li t1, SR_FS li t1, SR_FS
csrs CSR_SSTATUS, t1 csrs CSR_SSTATUS, t1
...@@ -319,10 +316,9 @@ __kvm_riscv_fp_d_save: ...@@ -319,10 +316,9 @@ __kvm_riscv_fp_d_save:
sw t0, KVM_ARCH_FP_D_FCSR(a0) sw t0, KVM_ARCH_FP_D_FCSR(a0)
csrw CSR_SSTATUS, t2 csrw CSR_SSTATUS, t2
ret ret
SYM_FUNC_END(__kvm_riscv_fp_d_save)
.align 3 SYM_FUNC_START(__kvm_riscv_fp_f_restore)
.global __kvm_riscv_fp_f_restore
__kvm_riscv_fp_f_restore:
csrr t2, CSR_SSTATUS csrr t2, CSR_SSTATUS
li t1, SR_FS li t1, SR_FS
lw t0, KVM_ARCH_FP_F_FCSR(a0) lw t0, KVM_ARCH_FP_F_FCSR(a0)
...@@ -362,10 +358,9 @@ __kvm_riscv_fp_f_restore: ...@@ -362,10 +358,9 @@ __kvm_riscv_fp_f_restore:
fscsr t0 fscsr t0
csrw CSR_SSTATUS, t2 csrw CSR_SSTATUS, t2
ret ret
SYM_FUNC_END(__kvm_riscv_fp_f_restore)
.align 3 SYM_FUNC_START(__kvm_riscv_fp_d_restore)
.global __kvm_riscv_fp_d_restore
__kvm_riscv_fp_d_restore:
csrr t2, CSR_SSTATUS csrr t2, CSR_SSTATUS
li t1, SR_FS li t1, SR_FS
lw t0, KVM_ARCH_FP_D_FCSR(a0) lw t0, KVM_ARCH_FP_D_FCSR(a0)
...@@ -405,4 +400,5 @@ __kvm_riscv_fp_d_restore: ...@@ -405,4 +400,5 @@ __kvm_riscv_fp_d_restore:
fscsr t0 fscsr t0
csrw CSR_SSTATUS, t2 csrw CSR_SSTATUS, t2
ret ret
SYM_FUNC_END(__kvm_riscv_fp_d_restore)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment