Commit f1f9984f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-6.10-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull more RISC-V updates from Palmer Dabbelt:

 - The compression format used for boot images is now configurable at
   build time, and these formats are shown in `make help`

 - access_ok() has been optimized

 - A pair of performance bugs have been fixed in the uaccess handlers

 - Various fixes and cleanups, including one for the IMSIC build failure
   and one for the early-boot ftrace illegal NOPs bug

* tag 'riscv-for-linus-6.10-mw2' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: Fix early ftrace nop patching
  irqchip: riscv-imsic: Fixup riscv_ipi_set_virq_range() conflict
  riscv: selftests: Add signal handling vector tests
  riscv: mm: accelerate pagefault when badaccess
  riscv: uaccess: Relax the threshold for fast path
  riscv: uaccess: Allow the last potential unrolled copy
  riscv: typo in comment for get_f64_reg
  Use bool value in set_cpu_online()
  riscv: selftests: Add hwprobe binaries to .gitignore
  riscv: stacktrace: fixed walk_stackframe()
  ftrace: riscv: move from REGS to ARGS
  riscv: do not select MODULE_SECTIONS by default
  riscv: show help string for riscv-specific targets
  riscv: make image compression configurable
  riscv: cpufeature: Fix extension subset checking
  riscv: cpufeature: Fix thead vector hwcap removal
  riscv: rewrite __kernel_map_pages() to fix sleeping in invalid context
  riscv: force PAGE_SIZE linear mapping if debug_pagealloc is enabled
  riscv: Define TASK_SIZE_MAX for __access_ok()
  riscv: Remove PGDIR_SIZE_L3 and TASK_SIZE_MIN
parents 9351f138 6ca445d8
...@@ -129,7 +129,7 @@ config RISCV ...@@ -129,7 +129,7 @@ config RISCV
select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE) select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE)
select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_ARGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER
...@@ -141,6 +141,13 @@ config RISCV ...@@ -141,6 +141,13 @@ config RISCV
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO if MMU && 64BIT select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_BZIP2 if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_GZIP if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_LZ4 if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_LZMA if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_LZO if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_UNCOMPRESSED if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KERNEL_ZSTD if !XIP_KERNEL && !EFI_ZBOOT
select HAVE_KPROBES if !XIP_KERNEL select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
select HAVE_KRETPROBES if !XIP_KERNEL select HAVE_KRETPROBES if !XIP_KERNEL
...@@ -170,7 +177,6 @@ config RISCV ...@@ -170,7 +177,6 @@ config RISCV
select LOCK_MM_AND_FIND_VMA select LOCK_MM_AND_FIND_VMA
select MMU_GATHER_RCU_TABLE_FREE if SMP && MMU select MMU_GATHER_RCU_TABLE_FREE if SMP && MMU
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
select OF_IRQ select OF_IRQ
...@@ -861,6 +867,7 @@ config PARAVIRT_TIME_ACCOUNTING ...@@ -861,6 +867,7 @@ config PARAVIRT_TIME_ACCOUNTING
config RELOCATABLE config RELOCATABLE
bool "Build a relocatable kernel" bool "Build a relocatable kernel"
depends on MMU && 64BIT && !XIP_KERNEL depends on MMU && 64BIT && !XIP_KERNEL
select MODULE_SECTIONS if MODULES
help help
This builds a kernel as a Position Independent Executable (PIE), This builds a kernel as a Position Independent Executable (PIE),
which retains all relocation metadata required to relocate the which retains all relocation metadata required to relocate the
......
...@@ -154,6 +154,21 @@ endif ...@@ -154,6 +154,21 @@ endif
endif endif
endif endif
boot := arch/riscv/boot
boot-image-y := Image
boot-image-$(CONFIG_KERNEL_BZIP2) := Image.bz2
boot-image-$(CONFIG_KERNEL_GZIP) := Image.gz
boot-image-$(CONFIG_KERNEL_LZ4) := Image.lz4
boot-image-$(CONFIG_KERNEL_LZMA) := Image.lzma
boot-image-$(CONFIG_KERNEL_LZO) := Image.lzo
boot-image-$(CONFIG_KERNEL_ZSTD) := Image.zst
ifdef CONFIG_RISCV_M_MODE
boot-image-$(CONFIG_ARCH_CANAAN) := loader.bin
endif
boot-image-$(CONFIG_EFI_ZBOOT) := vmlinuz.efi
boot-image-$(CONFIG_XIP_KERNEL) := xipImage
KBUILD_IMAGE := $(boot)/$(boot-image-y)
libs-y += arch/riscv/lib/ libs-y += arch/riscv/lib/
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
...@@ -171,21 +186,19 @@ endif ...@@ -171,21 +186,19 @@ endif
vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg
vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg
BOOT_TARGETS := Image Image.gz loader loader.bin xipImage vmlinuz.efi BOOT_TARGETS := Image Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst loader loader.bin xipImage vmlinuz.efi
all: $(notdir $(KBUILD_IMAGE)) all: $(notdir $(KBUILD_IMAGE))
loader.bin: loader loader.bin: loader
Image.gz loader vmlinuz.efi: Image Image.gz Image.bz2 Image.lz4 Image.lzma Image.lzo Image.zst loader xipImage vmlinuz.efi: Image
$(BOOT_TARGETS): vmlinux $(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
@$(kecho) ' Kernel: $(boot)/$@ is ready' @$(kecho) ' Kernel: $(boot)/$@ is ready'
Image.%: Image # the install target always installs KBUILD_IMAGE (which may be compressed)
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ # but keep the zinstall target for compatibility with older releases
install: KBUILD_IMAGE := $(boot)/Image
zinstall: KBUILD_IMAGE := $(boot)/Image.gz
install zinstall: install zinstall:
$(call cmd,install) $(call cmd,install)
...@@ -206,3 +219,20 @@ rv32_defconfig: ...@@ -206,3 +219,20 @@ rv32_defconfig:
PHONY += rv32_nommu_virt_defconfig PHONY += rv32_nommu_virt_defconfig
rv32_nommu_virt_defconfig: rv32_nommu_virt_defconfig:
$(Q)$(MAKE) -f $(srctree)/Makefile nommu_virt_defconfig 32-bit.config $(Q)$(MAKE) -f $(srctree)/Makefile nommu_virt_defconfig 32-bit.config
define archhelp
echo ' Image - Uncompressed kernel image (arch/riscv/boot/Image)'
echo ' Image.gz - Compressed kernel image (arch/riscv/boot/Image.gz)'
echo ' Image.bz2 - Compressed kernel image (arch/riscv/boot/Image.bz2)'
echo ' Image.lz4 - Compressed kernel image (arch/riscv/boot/Image.lz4)'
echo ' Image.lzma - Compressed kernel image (arch/riscv/boot/Image.lzma)'
echo ' Image.lzo - Compressed kernel image (arch/riscv/boot/Image.lzo)'
echo ' Image.zst - Compressed kernel image (arch/riscv/boot/Image.zst)'
echo ' vmlinuz.efi - Compressed EFI kernel image (arch/riscv/boot/vmlinuz.efi)'
echo ' Default when CONFIG_EFI_ZBOOT=y'
echo ' xipImage - Execute-in-place kernel image (arch/riscv/boot/xipImage)'
echo ' Default when CONFIG_XIP_KERNEL=y'
echo ' install - Install kernel using (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or install to '
echo ' $$(INSTALL_PATH)'
endef
...@@ -17,15 +17,18 @@ ...@@ -17,15 +17,18 @@
# $3 - kernel map file # $3 - kernel map file
# $4 - default install path (blank if root directory) # $4 - default install path (blank if root directory)
if [ "$(basename $2)" = "Image.gz" ]; then case "${2##*/}" in
# Compressed install # Compressed install
Image.*|vmlinuz.efi)
echo "Installing compressed kernel" echo "Installing compressed kernel"
base=vmlinuz base=vmlinuz
else ;;
# Normal install # Normal install
*)
echo "Installing normal kernel" echo "Installing normal kernel"
base=vmlinux base=vmlinux
fi ;;
esac
if [ -f $4/$base-$1 ]; then if [ -f $4/$base-$1 ]; then
mv $4/$base-$1 $4/$base-$1.old mv $4/$base-$1 $4/$base-$1.old
......
...@@ -13,6 +13,12 @@ static inline void local_flush_icache_all(void) ...@@ -13,6 +13,12 @@ static inline void local_flush_icache_all(void)
asm volatile ("fence.i" ::: "memory"); asm volatile ("fence.i" ::: "memory");
} }
static inline void local_flush_icache_range(unsigned long start,
unsigned long end)
{
local_flush_icache_all();
}
#define PG_dcache_clean PG_arch_1 #define PG_dcache_clean PG_arch_1
static inline void flush_dcache_folio(struct folio *folio) static inline void flush_dcache_folio(struct folio *folio)
......
...@@ -124,20 +124,82 @@ struct dyn_ftrace; ...@@ -124,20 +124,82 @@ struct dyn_ftrace;
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
#define ftrace_init_nop ftrace_init_nop #define ftrace_init_nop ftrace_init_nop
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
#define arch_ftrace_get_regs(regs) NULL
struct ftrace_ops; struct ftrace_ops;
struct ftrace_regs; struct ftrace_regs {
unsigned long epc;
unsigned long ra;
unsigned long sp;
unsigned long s0;
unsigned long t1;
union {
unsigned long args[8];
struct {
unsigned long a0;
unsigned long a1;
unsigned long a2;
unsigned long a3;
unsigned long a4;
unsigned long a5;
unsigned long a6;
unsigned long a7;
};
};
};
static __always_inline unsigned long ftrace_regs_get_instruction_pointer(const struct ftrace_regs
*fregs)
{
return fregs->epc;
}
static __always_inline void ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
unsigned long pc)
{
fregs->epc = pc;
}
static __always_inline unsigned long ftrace_regs_get_stack_pointer(const struct ftrace_regs *fregs)
{
return fregs->sp;
}
static __always_inline unsigned long ftrace_regs_get_argument(struct ftrace_regs *fregs,
unsigned int n)
{
if (n < 8)
return fregs->args[n];
return 0;
}
static __always_inline unsigned long ftrace_regs_get_return_value(const struct ftrace_regs *fregs)
{
return fregs->a0;
}
static __always_inline void ftrace_regs_set_return_value(struct ftrace_regs *fregs,
unsigned long ret)
{
fregs->a0 = ret;
}
static __always_inline void ftrace_override_function_with_return(struct ftrace_regs *fregs)
{
fregs->epc = fregs->ra;
}
int ftrace_regs_query_register_offset(const char *name);
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs); struct ftrace_ops *op, struct ftrace_regs *fregs);
#define ftrace_graph_func ftrace_graph_func #define ftrace_graph_func ftrace_graph_func
static inline void __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) static inline void arch_ftrace_set_direct_caller(struct ftrace_regs *fregs, unsigned long addr)
{ {
regs->t1 = addr; fregs->t1 = addr;
} }
#define arch_ftrace_set_direct_caller(fregs, addr) \ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
__arch_ftrace_set_direct_caller(&(fregs)->regs, addr)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -16,8 +16,6 @@ extern bool pgtable_l5_enabled; ...@@ -16,8 +16,6 @@ extern bool pgtable_l5_enabled;
#define PGDIR_SHIFT_L3 30 #define PGDIR_SHIFT_L3 30
#define PGDIR_SHIFT_L4 39 #define PGDIR_SHIFT_L4 39
#define PGDIR_SHIFT_L5 48 #define PGDIR_SHIFT_L5 48
#define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3)
#define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \ #define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \
(pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3)) (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3))
/* Size of region mapped by a page global directory */ /* Size of region mapped by a page global directory */
......
...@@ -880,7 +880,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) ...@@ -880,7 +880,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
*/ */
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) #define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2) #define TASK_SIZE_MAX LONG_MAX
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) #define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
...@@ -892,7 +892,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) ...@@ -892,7 +892,6 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#else #else
#define TASK_SIZE FIXADDR_START #define TASK_SIZE FIXADDR_START
#define TASK_SIZE_MIN TASK_SIZE
#endif #endif
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
......
...@@ -382,6 +382,8 @@ static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1 ...@@ -382,6 +382,8 @@ static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1
static inline void sbi_init(void) {} static inline void sbi_init(void) {}
#endif /* CONFIG_RISCV_SBI */ #endif /* CONFIG_RISCV_SBI */
unsigned long riscv_get_mvendorid(void);
unsigned long riscv_get_marchid(void);
unsigned long riscv_cached_mvendorid(unsigned int cpu_id); unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
unsigned long riscv_cached_marchid(unsigned int cpu_id); unsigned long riscv_cached_marchid(unsigned int cpu_id);
unsigned long riscv_cached_mimpid(unsigned int cpu_id); unsigned long riscv_cached_mimpid(unsigned int cpu_id);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/ftrace.h>
#include <linux/suspend.h> #include <linux/suspend.h>
#include <asm/kvm_host.h> #include <asm/kvm_host.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
...@@ -488,4 +489,21 @@ void asm_offsets(void) ...@@ -488,4 +489,21 @@ void asm_offsets(void)
DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN)); DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN));
OFFSET(STACKFRAME_FP, stackframe, fp); OFFSET(STACKFRAME_FP, stackframe, fp);
OFFSET(STACKFRAME_RA, stackframe, ra); OFFSET(STACKFRAME_RA, stackframe, ra);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
DEFINE(FREGS_SIZE_ON_STACK, ALIGN(sizeof(struct ftrace_regs), STACK_ALIGN));
DEFINE(FREGS_EPC, offsetof(struct ftrace_regs, epc));
DEFINE(FREGS_RA, offsetof(struct ftrace_regs, ra));
DEFINE(FREGS_SP, offsetof(struct ftrace_regs, sp));
DEFINE(FREGS_S0, offsetof(struct ftrace_regs, s0));
DEFINE(FREGS_T1, offsetof(struct ftrace_regs, t1));
DEFINE(FREGS_A0, offsetof(struct ftrace_regs, a0));
DEFINE(FREGS_A1, offsetof(struct ftrace_regs, a1));
DEFINE(FREGS_A2, offsetof(struct ftrace_regs, a2));
DEFINE(FREGS_A3, offsetof(struct ftrace_regs, a3));
DEFINE(FREGS_A4, offsetof(struct ftrace_regs, a4));
DEFINE(FREGS_A5, offsetof(struct ftrace_regs, a5));
DEFINE(FREGS_A6, offsetof(struct ftrace_regs, a6));
DEFINE(FREGS_A7, offsetof(struct ftrace_regs, a7));
#endif
} }
...@@ -139,6 +139,34 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid) ...@@ -139,6 +139,34 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
return -1; return -1;
} }
unsigned long __init riscv_get_marchid(void)
{
struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
#if IS_ENABLED(CONFIG_RISCV_SBI)
ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
ci->marchid = csr_read(CSR_MARCHID);
#else
ci->marchid = 0;
#endif
return ci->marchid;
}
unsigned long __init riscv_get_mvendorid(void)
{
struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
#if IS_ENABLED(CONFIG_RISCV_SBI)
ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
#elif IS_ENABLED(CONFIG_RISCV_M_MODE)
ci->mvendorid = csr_read(CSR_MVENDORID);
#else
ci->mvendorid = 0;
#endif
return ci->mvendorid;
}
DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo); DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
unsigned long riscv_cached_mvendorid(unsigned int cpu_id) unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
...@@ -170,11 +198,15 @@ static int riscv_cpuinfo_starting(unsigned int cpu) ...@@ -170,11 +198,15 @@ static int riscv_cpuinfo_starting(unsigned int cpu)
struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo); struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
#if IS_ENABLED(CONFIG_RISCV_SBI) #if IS_ENABLED(CONFIG_RISCV_SBI)
if (!ci->mvendorid)
ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid(); ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
if (!ci->marchid)
ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid(); ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid(); ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid();
#elif IS_ENABLED(CONFIG_RISCV_M_MODE) #elif IS_ENABLED(CONFIG_RISCV_M_MODE)
if (!ci->mvendorid)
ci->mvendorid = csr_read(CSR_MVENDORID); ci->mvendorid = csr_read(CSR_MVENDORID);
if (!ci->marchid)
ci->marchid = csr_read(CSR_MARCHID); ci->marchid = csr_read(CSR_MARCHID);
ci->mimpid = csr_read(CSR_MIMPID); ci->mimpid = csr_read(CSR_MIMPID);
#else #else
......
...@@ -490,6 +490,8 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) ...@@ -490,6 +490,8 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
struct acpi_table_header *rhct; struct acpi_table_header *rhct;
acpi_status status; acpi_status status;
unsigned int cpu; unsigned int cpu;
u64 boot_vendorid;
u64 boot_archid;
if (!acpi_disabled) { if (!acpi_disabled) {
status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct); status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
...@@ -497,6 +499,9 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) ...@@ -497,6 +499,9 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
return; return;
} }
boot_vendorid = riscv_get_mvendorid();
boot_archid = riscv_get_marchid();
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
struct riscv_isainfo *isainfo = &hart_isa[cpu]; struct riscv_isainfo *isainfo = &hart_isa[cpu];
unsigned long this_hwcap = 0; unsigned long this_hwcap = 0;
...@@ -544,8 +549,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) ...@@ -544,8 +549,7 @@ static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
* CPU cores with the ratified spec will contain non-zero * CPU cores with the ratified spec will contain non-zero
* marchid. * marchid.
*/ */
if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID && if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) {
riscv_cached_marchid(cpu) == 0x0) {
this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v]; this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
clear_bit(RISCV_ISA_EXT_v, isainfo->isa); clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
} }
...@@ -599,7 +603,7 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) ...@@ -599,7 +603,7 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
if (ext->subset_ext_size) { if (ext->subset_ext_size) {
for (int j = 0; j < ext->subset_ext_size; j++) { for (int j = 0; j < ext->subset_ext_size; j++) {
if (riscv_isa_extension_check(ext->subset_ext_ids[i])) if (riscv_isa_extension_check(ext->subset_ext_ids[j]))
set_bit(ext->subset_ext_ids[j], isainfo->isa); set_bit(ext->subset_ext_ids[j], isainfo->isa);
} }
} }
......
...@@ -211,7 +211,7 @@ SYM_FUNC_START(put_f64_reg) ...@@ -211,7 +211,7 @@ SYM_FUNC_START(put_f64_reg)
SYM_FUNC_END(put_f64_reg) SYM_FUNC_END(put_f64_reg)
/* /*
* put_f64_reg - Get a 64 bits FP register value and returned it or store it to * get_f64_reg - Get a 64 bits FP register value and returned it or store it to
* a pointer. * a pointer.
* a0 = FP register index to be retrieved * a0 = FP register index to be retrieved
* a1 = If xlen == 32, pointer which should be loaded with the FP register value * a1 = If xlen == 32, pointer which should be loaded with the FP register value
......
...@@ -120,6 +120,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) ...@@ -120,6 +120,9 @@ int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
out = ftrace_make_nop(mod, rec, MCOUNT_ADDR); out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
mutex_unlock(&text_mutex); mutex_unlock(&text_mutex);
if (!mod)
local_flush_icache_range(rec->ip, rec->ip + MCOUNT_INSN_SIZE);
return out; return out;
} }
...@@ -127,10 +130,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ...@@ -127,10 +130,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
{ {
int ret = __ftrace_modify_call((unsigned long)&ftrace_call, int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
(unsigned long)func, true, true); (unsigned long)func, true, true);
if (!ret) {
ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
(unsigned long)func, true, true);
}
return ret; return ret;
} }
...@@ -172,7 +171,7 @@ void arch_ftrace_update_code(int command) ...@@ -172,7 +171,7 @@ void arch_ftrace_update_code(int command)
} }
#endif #endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr) unsigned long addr)
{ {
...@@ -214,16 +213,13 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, ...@@ -214,16 +213,13 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
} }
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct ftrace_regs *fregs) struct ftrace_ops *op, struct ftrace_regs *fregs)
{ {
struct pt_regs *regs = arch_ftrace_get_regs(fregs); prepare_ftrace_return(&fregs->ra, ip, fregs->s0);
unsigned long *parent = (unsigned long *)&regs->ra;
prepare_ftrace_return(parent, ip, frame_pointer(regs));
} }
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
extern void ftrace_graph_call(void); extern void ftrace_graph_call(void);
int ftrace_enable_ftrace_graph_caller(void) int ftrace_enable_ftrace_graph_caller(void)
{ {
...@@ -236,6 +232,6 @@ int ftrace_disable_ftrace_graph_caller(void) ...@@ -236,6 +232,6 @@ int ftrace_disable_ftrace_graph_caller(void)
return __ftrace_modify_call((unsigned long)&ftrace_graph_call, return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
(unsigned long)&prepare_ftrace_return, false, true); (unsigned long)&prepare_ftrace_return, false, true);
} }
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
...@@ -56,138 +56,77 @@ ...@@ -56,138 +56,77 @@
addi sp, sp, ABI_SIZE_ON_STACK addi sp, sp, ABI_SIZE_ON_STACK
.endm .endm
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
/** /**
* SAVE_ABI_REGS - save regs against the pt_regs struct * SAVE_ABI_REGS - save regs against the ftrace_regs struct
*
* @all: tell if saving all the regs
*
* If all is set, all the regs will be saved, otherwise only ABI
* related regs (a0-a7,epc,ra and optional s0) will be saved.
* *
* After the stack is established, * After the stack is established,
* *
* 0(sp) stores the PC of the traced function which can be accessed * 0(sp) stores the PC of the traced function which can be accessed
* by &(fregs)->regs->epc in tracing function. Note that the real * by &(fregs)->epc in tracing function. Note that the real
* function entry address should be computed with -FENTRY_RA_OFFSET. * function entry address should be computed with -FENTRY_RA_OFFSET.
* *
* 8(sp) stores the function return address (i.e. parent IP) that * 8(sp) stores the function return address (i.e. parent IP) that
* can be accessed by &(fregs)->regs->ra in tracing function. * can be accessed by &(fregs)->ra in tracing function.
* *
* The other regs are saved at the respective localtion and accessed * The other regs are saved at the respective localtion and accessed
* by the respective pt_regs member. * by the respective ftrace_regs member.
* *
* Here is the layout of stack for your reference. * Here is the layout of stack for your reference.
* *
* PT_SIZE_ON_STACK -> +++++++++ * PT_SIZE_ON_STACK -> +++++++++
* + ..... + * + ..... +
* + t3-t6 +
* + s2-s11+
* + a0-a7 + --++++-> ftrace_caller saved * + a0-a7 + --++++-> ftrace_caller saved
* + s1 + + * + t1 + --++++-> direct tramp address
* + s0 + --+ * + s0 + --+ // frame pointer
* + t0-t2 + +
* + tp + +
* + gp + +
* + sp + + * + sp + +
* + ra + --+ // parent IP * + ra + --+ // parent IP
* sp -> + epc + --+ // PC * sp -> + epc + --+ // PC
* +++++++++ * +++++++++
**/ **/
.macro SAVE_ABI_REGS, all=0 .macro SAVE_ABI_REGS
addi sp, sp, -PT_SIZE_ON_STACK mv t4, sp // Save original SP in T4
addi sp, sp, -FREGS_SIZE_ON_STACK
REG_S t0, PT_EPC(sp)
REG_S x1, PT_RA(sp)
// save the ABI regs
REG_S x10, PT_A0(sp)
REG_S x11, PT_A1(sp)
REG_S x12, PT_A2(sp)
REG_S x13, PT_A3(sp)
REG_S x14, PT_A4(sp)
REG_S x15, PT_A5(sp)
REG_S x16, PT_A6(sp)
REG_S x17, PT_A7(sp)
// save the leftover regs
.if \all == 1 REG_S t0, FREGS_EPC(sp)
REG_S x2, PT_SP(sp) REG_S x1, FREGS_RA(sp)
REG_S x3, PT_GP(sp) REG_S t4, FREGS_SP(sp) // Put original SP on stack
REG_S x4, PT_TP(sp)
REG_S x5, PT_T0(sp)
REG_S x6, PT_T1(sp)
REG_S x7, PT_T2(sp)
REG_S x8, PT_S0(sp)
REG_S x9, PT_S1(sp)
REG_S x18, PT_S2(sp)
REG_S x19, PT_S3(sp)
REG_S x20, PT_S4(sp)
REG_S x21, PT_S5(sp)
REG_S x22, PT_S6(sp)
REG_S x23, PT_S7(sp)
REG_S x24, PT_S8(sp)
REG_S x25, PT_S9(sp)
REG_S x26, PT_S10(sp)
REG_S x27, PT_S11(sp)
REG_S x28, PT_T3(sp)
REG_S x29, PT_T4(sp)
REG_S x30, PT_T5(sp)
REG_S x31, PT_T6(sp)
// save s0 if FP_TEST defined
.else
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
REG_S x8, PT_S0(sp) REG_S x8, FREGS_S0(sp)
#endif #endif
.endif REG_S x6, FREGS_T1(sp)
// save the arguments
REG_S x10, FREGS_A0(sp)
REG_S x11, FREGS_A1(sp)
REG_S x12, FREGS_A2(sp)
REG_S x13, FREGS_A3(sp)
REG_S x14, FREGS_A4(sp)
REG_S x15, FREGS_A5(sp)
REG_S x16, FREGS_A6(sp)
REG_S x17, FREGS_A7(sp)
.endm .endm
.macro RESTORE_ABI_REGS, all=0 .macro RESTORE_ABI_REGS, all=0
REG_L t0, PT_EPC(sp) REG_L t0, FREGS_EPC(sp)
REG_L x1, PT_RA(sp) REG_L x1, FREGS_RA(sp)
REG_L x10, PT_A0(sp)
REG_L x11, PT_A1(sp)
REG_L x12, PT_A2(sp)
REG_L x13, PT_A3(sp)
REG_L x14, PT_A4(sp)
REG_L x15, PT_A5(sp)
REG_L x16, PT_A6(sp)
REG_L x17, PT_A7(sp)
.if \all == 1
REG_L x2, PT_SP(sp)
REG_L x3, PT_GP(sp)
REG_L x4, PT_TP(sp)
REG_L x6, PT_T1(sp)
REG_L x7, PT_T2(sp)
REG_L x8, PT_S0(sp)
REG_L x9, PT_S1(sp)
REG_L x18, PT_S2(sp)
REG_L x19, PT_S3(sp)
REG_L x20, PT_S4(sp)
REG_L x21, PT_S5(sp)
REG_L x22, PT_S6(sp)
REG_L x23, PT_S7(sp)
REG_L x24, PT_S8(sp)
REG_L x25, PT_S9(sp)
REG_L x26, PT_S10(sp)
REG_L x27, PT_S11(sp)
REG_L x28, PT_T3(sp)
REG_L x29, PT_T4(sp)
REG_L x30, PT_T5(sp)
REG_L x31, PT_T6(sp)
.else
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
REG_L x8, PT_S0(sp) REG_L x8, FREGS_S0(sp)
#endif #endif
.endif REG_L x6, FREGS_T1(sp)
addi sp, sp, PT_SIZE_ON_STACK
// restore the arguments
REG_L x10, FREGS_A0(sp)
REG_L x11, FREGS_A1(sp)
REG_L x12, FREGS_A2(sp)
REG_L x13, FREGS_A3(sp)
REG_L x14, FREGS_A4(sp)
REG_L x15, FREGS_A5(sp)
REG_L x16, FREGS_A6(sp)
REG_L x17, FREGS_A7(sp)
addi sp, sp, FREGS_SIZE_ON_STACK
.endm .endm
.macro PREPARE_ARGS .macro PREPARE_ARGS
...@@ -198,9 +137,9 @@ ...@@ -198,9 +137,9 @@
mv a3, sp mv a3, sp
.endm .endm
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS #ifndef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
SYM_FUNC_START(ftrace_caller) SYM_FUNC_START(ftrace_caller)
SAVE_ABI SAVE_ABI
...@@ -227,33 +166,23 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) ...@@ -227,33 +166,23 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
jr t0 jr t0
SYM_FUNC_END(ftrace_caller) SYM_FUNC_END(ftrace_caller)
#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #else /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
SYM_FUNC_START(ftrace_regs_caller) SYM_FUNC_START(ftrace_caller)
mv t1, zero mv t1, zero
SAVE_ABI_REGS 1 SAVE_ABI_REGS
PREPARE_ARGS PREPARE_ARGS
SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
call ftrace_stub call ftrace_stub
RESTORE_ABI_REGS 1 RESTORE_ABI_REGS
bnez t1, .Ldirect bnez t1, .Ldirect
jr t0 jr t0
.Ldirect: .Ldirect:
jr t1 jr t1
SYM_FUNC_END(ftrace_regs_caller)
SYM_FUNC_START(ftrace_caller)
SAVE_ABI_REGS 0
PREPARE_ARGS
SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
call ftrace_stub
RESTORE_ABI_REGS 0
jr t0
SYM_FUNC_END(ftrace_caller) SYM_FUNC_END(ftrace_caller)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_ARGS */
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
SYM_CODE_START(ftrace_stub_direct_tramp) SYM_CODE_START(ftrace_stub_direct_tramp)
......
...@@ -224,7 +224,7 @@ asmlinkage __visible void smp_callin(void) ...@@ -224,7 +224,7 @@ asmlinkage __visible void smp_callin(void)
riscv_ipi_enable(); riscv_ipi_enable();
numa_add_cpu(curr_cpuid); numa_add_cpu(curr_cpuid);
set_cpu_online(curr_cpuid, 1); set_cpu_online(curr_cpuid, true);
if (has_vector()) { if (has_vector()) {
if (riscv_v_setup_vsize()) if (riscv_v_setup_vsize())
......
...@@ -18,6 +18,16 @@ ...@@ -18,6 +18,16 @@
extern asmlinkage void ret_from_exception(void); extern asmlinkage void ret_from_exception(void);
static inline int fp_is_valid(unsigned long fp, unsigned long sp)
{
unsigned long low, high;
low = sp + sizeof(struct stackframe);
high = ALIGN(sp, THREAD_SIZE);
return !(fp < low || fp > high || fp & 0x07);
}
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(void *, unsigned long), void *arg) bool (*fn)(void *, unsigned long), void *arg)
{ {
...@@ -41,21 +51,19 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, ...@@ -41,21 +51,19 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
} }
for (;;) { for (;;) {
unsigned long low, high;
struct stackframe *frame; struct stackframe *frame;
if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc)))) if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
break; break;
/* Validate frame pointer */ if (unlikely(!fp_is_valid(fp, sp)))
low = sp + sizeof(struct stackframe);
high = ALIGN(sp, THREAD_SIZE);
if (unlikely(fp < low || fp > high || fp & 0x7))
break; break;
/* Unwind stack frame */ /* Unwind stack frame */
frame = (struct stackframe *)fp - 1; frame = (struct stackframe *)fp - 1;
sp = fp; sp = fp;
if (regs && (regs->epc == pc) && (frame->fp & 0x7)) { if (regs && (regs->epc == pc) && fp_is_valid(frame->ra, sp)) {
/* We hit function where ra is not saved on the stack */
fp = frame->ra; fp = frame->ra;
pc = regs->ra; pc = regs->ra;
} else { } else {
......
...@@ -44,7 +44,7 @@ SYM_FUNC_START(fallback_scalar_usercopy) ...@@ -44,7 +44,7 @@ SYM_FUNC_START(fallback_scalar_usercopy)
* Use byte copy only if too small. * Use byte copy only if too small.
* SZREG holds 4 for RV32 and 8 for RV64 * SZREG holds 4 for RV32 and 8 for RV64
*/ */
li a3, 9*SZREG /* size must be larger than size in word_copy */ li a3, 9*SZREG-1 /* size must >= (word_copy stride + SZREG-1) */
bltu a2, a3, .Lbyte_copy_tail bltu a2, a3, .Lbyte_copy_tail
/* /*
...@@ -103,7 +103,7 @@ SYM_FUNC_START(fallback_scalar_usercopy) ...@@ -103,7 +103,7 @@ SYM_FUNC_START(fallback_scalar_usercopy)
fixup REG_S t4, 7*SZREG(a0), 10f fixup REG_S t4, 7*SZREG(a0), 10f
addi a0, a0, 8*SZREG addi a0, a0, 8*SZREG
addi a1, a1, 8*SZREG addi a1, a1, 8*SZREG
bltu a0, t0, 2b bleu a0, t0, 2b
addi t0, t0, 8*SZREG /* revert to original value */ addi t0, t0, 8*SZREG /* revert to original value */
j .Lbyte_copy_tail j .Lbyte_copy_tail
......
...@@ -293,8 +293,8 @@ void handle_page_fault(struct pt_regs *regs) ...@@ -293,8 +293,8 @@ void handle_page_fault(struct pt_regs *regs)
if (unlikely(access_error(cause, vma))) { if (unlikely(access_error(cause, vma))) {
vma_end_read(vma); vma_end_read(vma);
count_vm_vma_lock_event(VMA_LOCK_SUCCESS); count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
tsk->thread.bad_cause = cause; tsk->thread.bad_cause = SEGV_ACCERR;
bad_area_nosemaphore(regs, SEGV_ACCERR, addr); bad_area_nosemaphore(regs, code, addr);
return; return;
} }
......
...@@ -683,6 +683,9 @@ void __init create_pgd_mapping(pgd_t *pgdp, ...@@ -683,6 +683,9 @@ void __init create_pgd_mapping(pgd_t *pgdp,
static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va, static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
phys_addr_t size) phys_addr_t size)
{ {
if (debug_pagealloc_enabled())
return PAGE_SIZE;
if (pgtable_l5_enabled && if (pgtable_l5_enabled &&
!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
return P4D_SIZE; return P4D_SIZE;
......
...@@ -387,17 +387,33 @@ int set_direct_map_default_noflush(struct page *page) ...@@ -387,17 +387,33 @@ int set_direct_map_default_noflush(struct page *page)
} }
#ifdef CONFIG_DEBUG_PAGEALLOC #ifdef CONFIG_DEBUG_PAGEALLOC
static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data)
{
int enable = *(int *)data;
unsigned long val = pte_val(ptep_get(pte));
if (enable)
val |= _PAGE_PRESENT;
else
val &= ~_PAGE_PRESENT;
set_pte(pte, __pte(val));
return 0;
}
void __kernel_map_pages(struct page *page, int numpages, int enable) void __kernel_map_pages(struct page *page, int numpages, int enable)
{ {
if (!debug_pagealloc_enabled()) if (!debug_pagealloc_enabled())
return; return;
if (enable) unsigned long start = (unsigned long)page_address(page);
__set_memory((unsigned long)page_address(page), numpages, unsigned long size = PAGE_SIZE * numpages;
__pgprot(_PAGE_PRESENT), __pgprot(0));
else apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable);
__set_memory((unsigned long)page_address(page), numpages,
__pgprot(0), __pgprot(_PAGE_PRESENT)); flush_tlb_kernel_range(start, start + size);
} }
#endif #endif
......
...@@ -49,7 +49,7 @@ static int __init imsic_ipi_domain_init(void) ...@@ -49,7 +49,7 @@ static int __init imsic_ipi_domain_init(void)
return virq < 0 ? virq : -ENOMEM; return virq < 0 ? virq : -ENOMEM;
/* Set vIRQ range */ /* Set vIRQ range */
riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI, true); riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI);
/* Announce that IMSIC is providing IPIs */ /* Announce that IMSIC is providing IPIs */
pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID); pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID);
......
...@@ -819,7 +819,8 @@ static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec) ...@@ -819,7 +819,8 @@ static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
#if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \ #if defined(CONFIG_DYNAMIC_FTRACE_WITH_REGS) || \
defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) defined(CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS) || \
defined(CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS)
/** /**
* ftrace_modify_call - convert from one addr to another (no nop) * ftrace_modify_call - convert from one addr to another (no nop)
* @rec: the call site record (e.g. mcount/fentry) * @rec: the call site record (e.g. mcount/fentry)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
ARCH ?= $(shell uname -m 2>/dev/null || echo not) ARCH ?= $(shell uname -m 2>/dev/null || echo not)
ifneq (,$(filter $(ARCH),riscv)) ifneq (,$(filter $(ARCH),riscv))
RISCV_SUBTARGETS ?= hwprobe vector mm RISCV_SUBTARGETS ?= hwprobe vector mm sigreturn
else else
RISCV_SUBTARGETS := RISCV_SUBTARGETS :=
endif endif
......
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2021 ARM Limited
# Originally tools/testing/arm64/abi/Makefile
CFLAGS += -I$(top_srcdir)/tools/include
TEST_GEN_PROGS := sigreturn
include ../../lib.mk
$(OUTPUT)/sigreturn: sigreturn.c
$(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
// SPDX-License-Identifier: GPL-2.0-only
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <ucontext.h>
#include <linux/ptrace.h>
#include "../../kselftest_harness.h"
#define RISCV_V_MAGIC 0x53465457
#define DEFAULT_VALUE 2
#define SIGNAL_HANDLER_OVERRIDE 3
static void simple_handle(int sig_no, siginfo_t *info, void *vcontext)
{
ucontext_t *context = vcontext;
context->uc_mcontext.__gregs[REG_PC] = context->uc_mcontext.__gregs[REG_PC] + 4;
}
static void vector_override(int sig_no, siginfo_t *info, void *vcontext)
{
ucontext_t *context = vcontext;
// vector state
struct __riscv_extra_ext_header *ext;
struct __riscv_v_ext_state *v_ext_state;
/* Find the vector context. */
ext = (void *)(&context->uc_mcontext.__fpregs);
if (ext->hdr.magic != RISCV_V_MAGIC) {
fprintf(stderr, "bad vector magic: %x\n", ext->hdr.magic);
abort();
}
v_ext_state = (void *)((char *)(ext) + sizeof(*ext));
*(int *)v_ext_state->datap = SIGNAL_HANDLER_OVERRIDE;
context->uc_mcontext.__gregs[REG_PC] = context->uc_mcontext.__gregs[REG_PC] + 4;
}
static int vector_sigreturn(int data, void (*handler)(int, siginfo_t *, void *))
{
int after_sigreturn;
struct sigaction sig_action = {
.sa_sigaction = handler,
.sa_flags = SA_SIGINFO
};
sigaction(SIGSEGV, &sig_action, 0);
asm(".option push \n\
.option arch, +v \n\
vsetivli x0, 1, e32, ta, ma \n\
vmv.s.x v0, %1 \n\
# Generate SIGSEGV \n\
lw a0, 0(x0) \n\
vmv.x.s %0, v0 \n\
.option pop" : "=r" (after_sigreturn) : "r" (data));
return after_sigreturn;
}
TEST(vector_restore)
{
int result;
result = vector_sigreturn(DEFAULT_VALUE, &simple_handle);
EXPECT_EQ(DEFAULT_VALUE, result);
}
TEST(vector_restore_signal_handler_override)
{
int result;
result = vector_sigreturn(DEFAULT_VALUE, &vector_override);
EXPECT_EQ(SIGNAL_HANDLER_OVERRIDE, result);
}
TEST_HARNESS_MAIN
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment