Commit 1f77ed94 authored by Palmer Dabbelt's avatar Palmer Dabbelt

riscv: switch to relative extable and other improvements

Similar as other architectures such as arm64, x86 and so on, use
offsets relative to the exception table entry values rather than
absolute addresses for both the exception locationand the fixup.
And recently, arm64 and x86 remove anonymous out-of-line fixups, we
want to acchieve the same result.
parents dacef016 a2ceb8c4
# SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h
generic-y += extable.h
generic-y += flat.h
generic-y += kvm_para.h
generic-y += user.h
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_ASM_EXTABLE_H
#define __ASM_ASM_EXTABLE_H
#define EX_TYPE_NONE 0
#define EX_TYPE_FIXUP 1
#define EX_TYPE_BPF 2
#define EX_TYPE_UACCESS_ERR_ZERO 3
#ifdef __ASSEMBLY__
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
.pushsection __ex_table, "a"; \
.balign 4; \
.long ((insn) - .); \
.long ((fixup) - .); \
.short (type); \
.short (data); \
.popsection;
.macro _asm_extable, insn, fixup
__ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
.endm
#else /* __ASSEMBLY__ */
#include <linux/bits.h>
#include <linux/stringify.h>
#include <asm/gpr-num.h>
#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
".pushsection __ex_table, \"a\"\n" \
".balign 4\n" \
".long ((" insn ") - .)\n" \
".long ((" fixup ") - .)\n" \
".short (" type ")\n" \
".short (" data ")\n" \
".popsection\n"
#define _ASM_EXTABLE(insn, fixup) \
__ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(4, 0)
#define EX_DATA_REG_ZERO_SHIFT 5
#define EX_DATA_REG_ZERO GENMASK(9, 5)
#define EX_DATA_REG(reg, gpr) \
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
__DEFINE_ASM_GPR_NUMS \
__ASM_EXTABLE_RAW(#insn, #fixup, \
__stringify(EX_TYPE_UACCESS_ERR_ZERO), \
"(" \
EX_DATA_REG(ERR, err) " | " \
EX_DATA_REG(ZERO, zero) \
")")
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_ASM_EXTABLE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_EXTABLE_H
#define _ASM_RISCV_EXTABLE_H
/*
* The exception table consists of pairs of relative offsets: the first
* is the relative offset to an instruction that is allowed to fault,
* and the second is the relative offset at which the program should
* continue. No registers are modified, so it is entirely up to the
* continuation code to figure out what to do.
*
* All the routines below use bits of fixup code that are out of line
* with the main instruction path. This means when everything is well,
* we don't even have to jump over them. Further, they do not intrude
* on our cache or tlb entries.
*/
struct exception_table_entry {
int insn, fixup;
short type, data;
};
#define ARCH_HAS_RELATIVE_EXTABLE
#define swap_ex_entry_fixup(a, b, tmp, delta) \
do { \
(a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \
(a)->type = (b)->type; \
(b)->type = (tmp).type; \
(a)->data = (b)->data; \
(b)->data = (tmp).data; \
} while (0)
bool fixup_exception(struct pt_regs *regs);
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs);
#else
static inline bool
ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
return false;
}
#endif
#endif
......@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/asm-extable.h>
/* We don't even really need the extable code, but for now keep it simple */
#ifndef CONFIG_MMU
......@@ -20,23 +21,14 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
uintptr_t tmp; \
__enable_user_access(); \
__asm__ __volatile__ ( \
"1: " insn " \n" \
"2: \n" \
" .section .fixup,\"ax\" \n" \
" .balign 4 \n" \
"3: li %[r],%[e] \n" \
" jump 2b,%[t] \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
" .balign " RISCV_SZPTR " \n" \
" " RISCV_PTR " 1b, 3b \n" \
" .previous \n" \
_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %[r]) \
: [r] "+r" (ret), [ov] "=&r" (oldval), \
[u] "+m" (*uaddr), [t] "=&r" (tmp) \
: [op] "Jr" (oparg), [e] "i" (-EFAULT) \
[u] "+m" (*uaddr) \
: [op] "Jr" (oparg) \
: "memory"); \
__disable_user_access(); \
}
......@@ -98,18 +90,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"2: sc.w.aqrl %[t],%z[nv],%[u] \n"
" bnez %[t],1b \n"
"3: \n"
" .section .fixup,\"ax\" \n"
" .balign 4 \n"
"4: li %[r],%[e] \n"
" jump 3b,%[t] \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
" .balign " RISCV_SZPTR " \n"
" " RISCV_PTR " 1b, 4b \n"
" " RISCV_PTR " 2b, 4b \n"
" .previous \n"
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
: [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "i" (-EFAULT)
: [ov] "Jr" (oldval), [nv] "Jr" (newval)
: "memory");
__disable_user_access();
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_GPR_NUM_H
#define __ASM_GPR_NUM_H
#ifdef __ASSEMBLY__
.equ .L__gpr_num_zero, 0
.equ .L__gpr_num_ra, 1
.equ .L__gpr_num_sp, 2
.equ .L__gpr_num_gp, 3
.equ .L__gpr_num_tp, 4
.equ .L__gpr_num_t0, 5
.equ .L__gpr_num_t1, 6
.equ .L__gpr_num_t2, 7
.equ .L__gpr_num_s0, 8
.equ .L__gpr_num_s1, 9
.equ .L__gpr_num_a0, 10
.equ .L__gpr_num_a1, 11
.equ .L__gpr_num_a2, 12
.equ .L__gpr_num_a3, 13
.equ .L__gpr_num_a4, 14
.equ .L__gpr_num_a5, 15
.equ .L__gpr_num_a6, 16
.equ .L__gpr_num_a7, 17
.equ .L__gpr_num_s2, 18
.equ .L__gpr_num_s3, 19
.equ .L__gpr_num_s4, 20
.equ .L__gpr_num_s5, 21
.equ .L__gpr_num_s6, 22
.equ .L__gpr_num_s7, 23
.equ .L__gpr_num_s8, 24
.equ .L__gpr_num_s9, 25
.equ .L__gpr_num_s10, 26
.equ .L__gpr_num_s11, 27
.equ .L__gpr_num_t3, 28
.equ .L__gpr_num_t4, 29
.equ .L__gpr_num_t5, 30
.equ .L__gpr_num_t6, 31
#else /* __ASSEMBLY__ */
#define __DEFINE_ASM_GPR_NUMS \
" .equ .L__gpr_num_zero, 0\n" \
" .equ .L__gpr_num_ra, 1\n" \
" .equ .L__gpr_num_sp, 2\n" \
" .equ .L__gpr_num_gp, 3\n" \
" .equ .L__gpr_num_tp, 4\n" \
" .equ .L__gpr_num_t0, 5\n" \
" .equ .L__gpr_num_t1, 6\n" \
" .equ .L__gpr_num_t2, 7\n" \
" .equ .L__gpr_num_s0, 8\n" \
" .equ .L__gpr_num_s1, 9\n" \
" .equ .L__gpr_num_a0, 10\n" \
" .equ .L__gpr_num_a1, 11\n" \
" .equ .L__gpr_num_a2, 12\n" \
" .equ .L__gpr_num_a3, 13\n" \
" .equ .L__gpr_num_a4, 14\n" \
" .equ .L__gpr_num_a5, 15\n" \
" .equ .L__gpr_num_a6, 16\n" \
" .equ .L__gpr_num_a7, 17\n" \
" .equ .L__gpr_num_s2, 18\n" \
" .equ .L__gpr_num_s3, 19\n" \
" .equ .L__gpr_num_s4, 20\n" \
" .equ .L__gpr_num_s5, 21\n" \
" .equ .L__gpr_num_s6, 22\n" \
" .equ .L__gpr_num_s7, 23\n" \
" .equ .L__gpr_num_s8, 24\n" \
" .equ .L__gpr_num_s9, 25\n" \
" .equ .L__gpr_num_s10, 26\n" \
" .equ .L__gpr_num_s11, 27\n" \
" .equ .L__gpr_num_t3, 28\n" \
" .equ .L__gpr_num_t4, 29\n" \
" .equ .L__gpr_num_t5, 30\n" \
" .equ .L__gpr_num_t6, 31\n"
#endif /* __ASSEMBLY__ */
#endif /* __ASM_GPR_NUM_H */
......@@ -8,6 +8,7 @@
#ifndef _ASM_RISCV_UACCESS_H
#define _ASM_RISCV_UACCESS_H
#include <asm/asm-extable.h>
#include <asm/pgtable.h> /* for TASK_SIZE */
/*
......@@ -80,25 +81,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
#define __get_user_asm(insn, x, ptr, err) \
do { \
uintptr_t __tmp; \
__typeof__(x) __x; \
__asm__ __volatile__ ( \
"1:\n" \
" " insn " %1, %3\n" \
" " insn " %1, %2\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .balign 4\n" \
"3:\n" \
" li %0, %4\n" \
" li %1, 0\n" \
" jump 2b, %2\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .balign " RISCV_SZPTR "\n" \
" " RISCV_PTR " 1b, 3b\n" \
" .previous" \
: "+r" (err), "=&r" (__x), "=r" (__tmp) \
: "m" (*(ptr)), "i" (-EFAULT)); \
_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \
: "+r" (err), "=&r" (__x) \
: "m" (*(ptr))); \
(x) = __x; \
} while (0)
......@@ -110,30 +100,18 @@ do { \
do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u32 __lo, __hi; \
uintptr_t __tmp; \
__asm__ __volatile__ ( \
"1:\n" \
" lw %1, %4\n" \
" lw %1, %3\n" \
"2:\n" \
" lw %2, %5\n" \
" lw %2, %4\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .balign 4\n" \
"4:\n" \
" li %0, %6\n" \
" li %1, 0\n" \
" li %2, 0\n" \
" jump 3b, %3\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .balign " RISCV_SZPTR "\n" \
" " RISCV_PTR " 1b, 4b\n" \
" " RISCV_PTR " 2b, 4b\n" \
" .previous" \
: "+r" (err), "=&r" (__lo), "=r" (__hi), \
"=r" (__tmp) \
: "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
"i" (-EFAULT)); \
_ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
: "+r" (err), "=&r" (__lo), "=r" (__hi) \
: "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
if (err) \
__hi = 0; \
(x) = (__typeof__(x))((__typeof__((x)-(x)))( \
(((u64)__hi << 32) | __lo))); \
} while (0)
......@@ -221,24 +199,14 @@ do { \
#define __put_user_asm(insn, x, ptr, err) \
do { \
uintptr_t __tmp; \
__typeof__(*(ptr)) __x = x; \
__asm__ __volatile__ ( \
"1:\n" \
" " insn " %z3, %2\n" \
" " insn " %z2, %1\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .balign 4\n" \
"3:\n" \
" li %0, %4\n" \
" jump 2b, %1\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .balign " RISCV_SZPTR "\n" \
" " RISCV_PTR " 1b, 3b\n" \
" .previous" \
: "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
: "rJ" (__x), "i" (-EFAULT)); \
_ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \
: "+r" (err), "=m" (*(ptr)) \
: "rJ" (__x)); \
} while (0)
#ifdef CONFIG_64BIT
......@@ -249,28 +217,18 @@ do { \
do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u64 __x = (__typeof__((x)-(x)))(x); \
uintptr_t __tmp; \
__asm__ __volatile__ ( \
"1:\n" \
" sw %z4, %2\n" \
" sw %z3, %1\n" \
"2:\n" \
" sw %z5, %3\n" \
" sw %z4, %2\n" \
"3:\n" \
" .section .fixup,\"ax\"\n" \
" .balign 4\n" \
"4:\n" \
" li %0, %6\n" \
" jump 3b, %1\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .balign " RISCV_SZPTR "\n" \
" " RISCV_PTR " 1b, 4b\n" \
" " RISCV_PTR " 2b, 4b\n" \
" .previous" \
: "+r" (err), "=r" (__tmp), \
_ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
_ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
: "+r" (err), \
"=m" (__ptr[__LSW]), \
"=m" (__ptr[__MSW]) \
: "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
: "rJ" (__x), "rJ" (__x >> 32)); \
} while (0)
#endif /* CONFIG_64BIT */
......@@ -388,81 +346,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
__clear_user(to, n) : n;
}
/*
* Atomic compare-and-exchange, but with a fixup for userspace faults. Faults
* will set "err" to -EFAULT, while successful accesses return the previous
* value.
*/
#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __ret; \
__typeof__(err) __err = 0; \
register unsigned int __rc; \
__enable_user_access(); \
switch (size) { \
case 4: \
__asm__ __volatile__ ( \
"0:\n" \
" lr.w" #scb " %[ret], %[ptr]\n" \
" bne %[ret], %z[old], 1f\n" \
" sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
" bnez %[rc], 0b\n" \
"1:\n" \
".section .fixup,\"ax\"\n" \
".balign 4\n" \
"2:\n" \
" li %[err], %[efault]\n" \
" jump 1b, %[rc]\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
".balign " RISCV_SZPTR "\n" \
" " RISCV_PTR " 1b, 2b\n" \
".previous\n" \
: [ret] "=&r" (__ret), \
[rc] "=&r" (__rc), \
[ptr] "+A" (*__ptr), \
[err] "=&r" (__err) \
: [old] "rJ" (__old), \
[new] "rJ" (__new), \
[efault] "i" (-EFAULT)); \
break; \
case 8: \
__asm__ __volatile__ ( \
"0:\n" \
" lr.d" #scb " %[ret], %[ptr]\n" \
" bne %[ret], %z[old], 1f\n" \
" sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
" bnez %[rc], 0b\n" \
"1:\n" \
".section .fixup,\"ax\"\n" \
".balign 4\n" \
"2:\n" \
" li %[err], %[efault]\n" \
" jump 1b, %[rc]\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
".balign " RISCV_SZPTR "\n" \
" " RISCV_PTR " 1b, 2b\n" \
".previous\n" \
: [ret] "=&r" (__ret), \
[rc] "=&r" (__rc), \
[ptr] "+A" (*__ptr), \
[err] "=&r" (__err) \
: [old] "rJ" (__old), \
[new] "rJ" (__new), \
[efault] "i" (-EFAULT)); \
break; \
default: \
BUILD_BUG(); \
} \
__disable_user_access(); \
(err) = __err; \
__ret; \
})
#define HAVE_GET_KERNEL_NOFAULT
#define __get_kernel_nofault(dst, src, type, err_label) \
......
......@@ -45,7 +45,6 @@ SECTIONS
ENTRY_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup)
_etext = .;
}
RO_DATA(L1_CACHE_BYTES)
......
......@@ -4,7 +4,7 @@
* Copyright (C) 2017 SiFive
*/
#define RO_EXCEPTION_TABLE_ALIGN 16
#define RO_EXCEPTION_TABLE_ALIGN 4
#ifdef CONFIG_XIP_KERNEL
#include "vmlinux-xip.lds.S"
......@@ -48,7 +48,6 @@ SECTIONS
ENTRY_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup)
_etext = .;
}
......
#include <linux/linkage.h>
#include <asm-generic/export.h>
#include <asm/asm.h>
#include <asm/asm-extable.h>
#include <asm/csr.h>
.macro fixup op reg addr lbl
100:
\op \reg, \addr
.section __ex_table,"a"
.balign RISCV_SZPTR
RISCV_PTR 100b, \lbl
.previous
_asm_extable 100b, \lbl
.endm
ENTRY(__asm_copy_to_user)
......@@ -173,6 +171,13 @@ ENTRY(__asm_copy_from_user)
csrc CSR_STATUS, t6
li a0, 0
ret
/* Exception fixup code */
10:
/* Disable access to user memory */
csrs CSR_STATUS, t6
mv a0, t5
ret
ENDPROC(__asm_copy_to_user)
ENDPROC(__asm_copy_from_user)
EXPORT_SYMBOL(__asm_copy_to_user)
......@@ -218,19 +223,12 @@ ENTRY(__clear_user)
addi a0, a0, 1
bltu a0, a3, 5b
j 3b
ENDPROC(__clear_user)
EXPORT_SYMBOL(__clear_user)
.section .fixup,"ax"
.balign 4
/* Fixup code for __copy_user(10) and __clear_user(11) */
10:
/* Disable access to user memory */
csrs CSR_STATUS, t6
mv a0, t5
ret
/* Exception fixup code */
11:
/* Disable access to user memory */
csrs CSR_STATUS, t6
mv a0, a1
ret
.previous
ENDPROC(__clear_user)
EXPORT_SYMBOL(__clear_user)
......@@ -7,27 +7,65 @@
*/
#include <linux/bitfield.h>
#include <linux/extable.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <asm/asm-extable.h>
#include <asm/ptrace.h>
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
int rv_bpf_fixup_exception(const struct exception_table_entry *ex, struct pt_regs *regs);
#endif
static inline unsigned long
get_ex_fixup(const struct exception_table_entry *ex)
{
return ((unsigned long)&ex->fixup + ex->fixup);
}
static bool ex_handler_fixup(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
regs->epc = get_ex_fixup(ex);
return true;
}
static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset,
unsigned long val)
{
if (unlikely(offset > MAX_REG_OFFSET))
return;
if (!offset)
*(unsigned long *)((unsigned long)regs + offset) = val;
}
static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
regs_set_gpr(regs, reg_err, -EFAULT);
regs_set_gpr(regs, reg_zero, 0);
regs->epc = get_ex_fixup(ex);
return true;
}
int fixup_exception(struct pt_regs *regs)
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
const struct exception_table_entry *ex;
fixup = search_exception_tables(regs->epc);
if (!fixup)
return 0;
ex = search_exception_tables(regs->epc);
if (!ex)
return false;
#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
if (regs->epc >= BPF_JIT_REGION_START && regs->epc < BPF_JIT_REGION_END)
return rv_bpf_fixup_exception(fixup, regs);
#endif
switch (ex->type) {
case EX_TYPE_FIXUP:
return ex_handler_fixup(ex, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
}
regs->epc = fixup->fixup;
return 1;
BUG();
}
......@@ -459,10 +459,8 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
int rv_bpf_fixup_exception(const struct exception_table_entry *ex,
struct pt_regs *regs);
int rv_bpf_fixup_exception(const struct exception_table_entry *ex,
struct pt_regs *regs)
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
......@@ -470,7 +468,7 @@ int rv_bpf_fixup_exception(const struct exception_table_entry *ex,
*(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
regs->epc = (unsigned long)&ex->fixup - offset;
return 1;
return true;
}
/* For accesses to BTF pointers, add an entry to the exception table */
......@@ -516,6 +514,7 @@ static int add_exception_handler(const struct bpf_insn *insn,
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
ex->type = EX_TYPE_BPF;
ctx->nexentries++;
return 0;
......
......@@ -1830,6 +1830,14 @@ static int addend_mips_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
return 0;
}
#ifndef EM_RISCV
#define EM_RISCV 243
#endif
#ifndef R_RISCV_SUB32
#define R_RISCV_SUB32 39
#endif
static void section_rela(const char *modname, struct elf_info *elf,
Elf_Shdr *sechdr)
{
......@@ -1866,6 +1874,13 @@ static void section_rela(const char *modname, struct elf_info *elf,
r_sym = ELF_R_SYM(r.r_info);
#endif
r.r_addend = TO_NATIVE(rela->r_addend);
switch (elf->hdr->e_machine) {
case EM_RISCV:
if (!strcmp("__ex_table", fromsec) &&
ELF_R_TYPE(r.r_info) == R_RISCV_SUB32)
continue;
break;
}
sym = elf->symtab_start + r_sym;
/* Skip special sections */
if (is_shndx_special(sym->st_shndx))
......
......@@ -231,7 +231,7 @@ static void sort_relative_table(char *extab_image, int image_size)
}
}
static void arm64_sort_relative_table(char *extab_image, int image_size)
static void sort_relative_table_with_data(char *extab_image, int image_size)
{
int i = 0;
......@@ -259,34 +259,6 @@ static void arm64_sort_relative_table(char *extab_image, int image_size)
}
}
static void x86_sort_relative_table(char *extab_image, int image_size)
{
int i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) + i, loc);
w(r(loc + 1) + i + 4, loc + 1);
/* Don't touch the fixup type */
i += sizeof(uint32_t) * 3;
}
qsort(extab_image, image_size / 12, 12, compare_relative_table);
i = 0;
while (i < image_size) {
uint32_t *loc = (uint32_t *)(extab_image + i);
w(r(loc) - i, loc);
w(r(loc + 1) - (i + 4), loc + 1);
/* Don't touch the fixup type */
i += sizeof(uint32_t) * 3;
}
}
static void s390_sort_relative_table(char *extab_image, int image_size)
{
int i;
......@@ -364,15 +336,14 @@ static int do_file(char const *const fname, void *addr)
switch (r2(&ehdr->e_machine)) {
case EM_386:
case EM_AARCH64:
case EM_RISCV:
case EM_X86_64:
custom_sort = x86_sort_relative_table;
custom_sort = sort_relative_table_with_data;
break;
case EM_S390:
custom_sort = s390_sort_relative_table;
break;
case EM_AARCH64:
custom_sort = arm64_sort_relative_table;
break;
case EM_PARISC:
case EM_PPC:
case EM_PPC64:
......@@ -383,7 +354,6 @@ static int do_file(char const *const fname, void *addr)
case EM_ARM:
case EM_MICROBLAZE:
case EM_MIPS:
case EM_RISCV:
case EM_XTENSA:
break;
default:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment