Commit 6a0e20cd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv/for-v5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V updates from Paul Walmsley:
 "New features:
   - SECCOMP support
   - nommu support
   - SBI-less system support
   - M-Mode support
   - TLB flush optimizations

  Other improvements:
   - Pass the complete RISC-V ISA string supported by the CPU cores to
     userspace, rather than redacting parts of it in the kernel
   - Add platform DMA IP block data to the HiFive Unleashed board DT
     file
   - Add Makefile support for BZ2, LZ4, LZMA, LZO kernel image
     compression formats, in line with other architectures

  Cleanups:
   - Remove unnecessary PTE_PARENT_SIZE macro
   - Standardize include guard naming across arch/riscv"

* tag 'riscv/for-v5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: (22 commits)
  riscv: provide a flat image loader
  riscv: add nommu support
  riscv: clear the instruction cache and all registers when booting
  riscv: read the hart ID from mhartid on boot
  riscv: provide native clint access for M-mode
  riscv: dts: add support for PDMA device of HiFive Unleashed Rev A00
  riscv: add support for MMIO access to the timer registers
  riscv: implement remote sfence.i using IPIs
  riscv: cleanup the default power off implementation
  riscv: poison SBI calls for M-mode
  riscv: don't allow selecting SBI based drivers for M-mode
  RISC-V: Add multiple compression image format.
  riscv: clean up the macro format in each header file
  riscv: Use PMD_SIZE to replace PTE_PARENT_SIZE
  riscv: abstract out CSR names for supervisor vs machine mode
  riscv: separate MMIO functions into their own header file
  riscv: enter WFI in default_power_off() if SBI does not shutdown
  RISC-V: Issue a tlb page flush if possible
  RISC-V: Issue a local tlbflush if possible.
  RISC-V: Do not invoke SBI call if cpumask is empty
  ...
parents 80eb5fea 5ba9aa56
...@@ -26,14 +26,15 @@ config RISCV ...@@ -26,14 +26,15 @@ config RISCV
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER if MMU
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_ATOMIC64 if !64BIT select GENERIC_ATOMIC64 if !64BIT
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_REGS select HAVE_PERF_REGS
...@@ -50,6 +51,7 @@ config RISCV ...@@ -50,6 +51,7 @@ config RISCV
select PCI_DOMAINS_GENERIC if PCI select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI select PCI_MSI if PCI
select RISCV_TIMER select RISCV_TIMER
select UACCESS_MEMCPY if !MMU
select GENERIC_IRQ_MULTI_HANDLER select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_ARCH_TOPOLOGY if SMP select GENERIC_ARCH_TOPOLOGY if SMP
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
...@@ -60,7 +62,7 @@ config RISCV ...@@ -60,7 +62,7 @@ config RISCV
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT select SPARSEMEM_STATIC if 32BIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_BITS if MMU
config ARCH_MMAP_RND_BITS_MIN config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT default 18 if 64BIT
...@@ -72,8 +74,23 @@ config ARCH_MMAP_RND_BITS_MAX ...@@ -72,8 +74,23 @@ config ARCH_MMAP_RND_BITS_MAX
default 24 if 64BIT # SV39 based default 24 if 64BIT # SV39 based
default 17 default 17
# set if we run in machine mode, cleared if we run in supervisor mode
config RISCV_M_MODE
bool
default !MMU
# set if we are running in S-mode and can use SBI calls
config RISCV_SBI
bool
depends on !RISCV_M_MODE
default y
config MMU config MMU
def_bool y bool "MMU-based Paged Memory Management Support"
default y
help
Select if you want MMU-based virtualised addressing space
support by paged memory management. If unsure, say 'Y'.
config ZONE_DMA32 config ZONE_DMA32
bool bool
...@@ -92,6 +109,7 @@ config PA_BITS ...@@ -92,6 +109,7 @@ config PA_BITS
config PAGE_OFFSET config PAGE_OFFSET
hex hex
default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
default 0x80000000 if 64BIT && !MMU
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
...@@ -135,7 +153,7 @@ config GENERIC_HWEIGHT ...@@ -135,7 +153,7 @@ config GENERIC_HWEIGHT
def_bool y def_bool y
config FIX_EARLYCON_MEM config FIX_EARLYCON_MEM
def_bool y def_bool CONFIG_MMU
config PGTABLE_LEVELS config PGTABLE_LEVELS
int int
...@@ -160,6 +178,7 @@ config ARCH_RV32I ...@@ -160,6 +178,7 @@ config ARCH_RV32I
select GENERIC_LIB_ASHRDI3 select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_LSHRDI3 select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2 select GENERIC_LIB_UCMPDI2
select MMU
config ARCH_RV64I config ARCH_RV64I
bool "RV64I" bool "RV64I"
...@@ -168,9 +187,9 @@ config ARCH_RV64I ...@@ -168,9 +187,9 @@ config ARCH_RV64I
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE if MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select SWIOTLB select SWIOTLB if MMU
endchoice endchoice
...@@ -272,6 +291,19 @@ menu "Kernel features" ...@@ -272,6 +291,19 @@ menu "Kernel features"
source "kernel/Kconfig.hz" source "kernel/Kconfig.hz"
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via prctl(PR_SET_SECCOMP), it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
endmenu endmenu
menu "Boot options" menu "Boot options"
......
...@@ -83,13 +83,18 @@ PHONY += vdso_install ...@@ -83,13 +83,18 @@ PHONY += vdso_install
vdso_install: vdso_install:
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
all: Image.gz ifeq ($(CONFIG_RISCV_M_MODE),y)
KBUILD_IMAGE := $(boot)/loader
else
KBUILD_IMAGE := $(boot)/Image.gz
endif
BOOT_TARGETS := Image Image.gz loader
Image: vmlinux all: $(notdir $(KBUILD_IMAGE))
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
Image.%: Image $(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
@$(kecho) ' Kernel: $(boot)/$@ is ready'
zinstall install: zinstall install:
$(Q)$(MAKE) $(build)=$(boot) $@ $(Q)$(MAKE) $(build)=$(boot) $@
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
targets := Image targets := Image loader
$(obj)/Image: vmlinux FORCE $(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy) $(call if_changed,objcopy)
...@@ -24,6 +24,23 @@ $(obj)/Image: vmlinux FORCE ...@@ -24,6 +24,23 @@ $(obj)/Image: vmlinux FORCE
$(obj)/Image.gz: $(obj)/Image FORCE $(obj)/Image.gz: $(obj)/Image FORCE
$(call if_changed,gzip) $(call if_changed,gzip)
loader.o: $(src)/loader.S $(obj)/Image
$(obj)/loader: $(obj)/loader.o $(obj)/Image $(obj)/loader.lds FORCE
$(Q)$(LD) -T $(obj)/loader.lds -o $@ $(obj)/loader.o
$(obj)/Image.bz2: $(obj)/Image FORCE
$(call if_changed,bzip2)
$(obj)/Image.lz4: $(obj)/Image FORCE
$(call if_changed,lz4)
$(obj)/Image.lzma: $(obj)/Image FORCE
$(call if_changed,lzma)
$(obj)/Image.lzo: $(obj)/Image FORCE
$(call if_changed,lzo)
install: install:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image System.map "$(INSTALL_PATH)" $(obj)/Image System.map "$(INSTALL_PATH)"
......
...@@ -162,6 +162,13 @@ uart0: serial@10010000 { ...@@ -162,6 +162,13 @@ uart0: serial@10010000 {
clocks = <&prci PRCI_CLK_TLCLK>; clocks = <&prci PRCI_CLK_TLCLK>;
status = "disabled"; status = "disabled";
}; };
dma: dma@3000000 {
compatible = "sifive,fu540-c000-pdma";
reg = <0x0 0x3000000 0x0 0x8000>;
interrupt-parent = <&plic0>;
interrupts = <23 24 25 26 27 28 29 30>;
#dma-cells = <1>;
};
uart1: serial@10011000 { uart1: serial@10011000 {
compatible = "sifive,fu540-c000-uart", "sifive,uart0"; compatible = "sifive,fu540-c000-uart", "sifive,uart0";
reg = <0x0 0x10011000 0x0 0x1000>; reg = <0x0 0x10011000 0x0 0x1000>;
......
/* SPDX-License-Identifier: GPL-2.0 */
.align 4
.section .payload, "ax", %progbits
.globl _start
_start:
.incbin "arch/riscv/boot/Image"
/* SPDX-License-Identifier: GPL-2.0 */
#include <asm/page.h>
OUTPUT_ARCH(riscv)
ENTRY(_start)
SECTIONS
{
. = PAGE_OFFSET;
.payload : {
*(.payload)
. = ALIGN(8);
}
}
# CONFIG_CPU_ISOLATION is not set
CONFIG_LOG_BUF_SHIFT=16
CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_BZIP2 is not set
# CONFIG_RD_LZMA is not set
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
# CONFIG_FHANDLE is not set
# CONFIG_BASE_FULL is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
# CONFIG_TIMERFD is not set
# CONFIG_EVENTFD is not set
# CONFIG_AIO is not set
# CONFIG_IO_URING is not set
# CONFIG_ADVISE_SYSCALLS is not set
# CONFIG_MEMBARRIER is not set
# CONFIG_KALLSYMS is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y
# CONFIG_SLAB_MERGE_DEFAULT is not set
# CONFIG_MMU is not set
CONFIG_MAXPHYSMEM_2GB=y
CONFIG_SMP=y
CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
CONFIG_CMDLINE_FORCE=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
# CONFIG_MQ_IOSCHED_DEADLINE is not set
# CONFIG_MQ_IOSCHED_KYBER is not set
CONFIG_BINFMT_FLAT=y
# CONFIG_COREDUMP is not set
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FW_LOADER is not set
# CONFIG_ALLOW_DEV_COREDUMP is not set
CONFIG_VIRTIO_BLK=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_LDISC_AUTOLOAD is not set
# CONFIG_DEVMEM is not set
CONFIG_SERIAL_8250=y
# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
CONFIG_SERIAL_OF_PLATFORM=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_LCD_CLASS_DEVICE is not set
# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
CONFIG_SIFIVE_PLIC=y
# CONFIG_VALIDATE_FS_PARSER is not set
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
# CONFIG_MISC_FILESYSTEMS is not set
CONFIG_LSM="[]"
CONFIG_PRINTK_TIME=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_RCU_TRACE is not set
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_PROTOTYPES_H #ifndef _ASM_RISCV_PROTOTYPES_H
#define _ASM_RISCV_PROTOTYPES_H
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <asm-generic/asm-prototypes.h> #include <asm-generic/asm-prototypes.h>
......
...@@ -11,4 +11,12 @@ ...@@ -11,4 +11,12 @@
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
/*
* RISC-V requires the stack pointer to be 16-byte aligned, so ensure that
* the flat loader aligns it accordingly.
*/
#ifndef CONFIG_MMU
#define ARCH_SLAB_MINALIGN 16
#endif
#endif /* _ASM_RISCV_CACHE_H */ #endif /* _ASM_RISCV_CACHE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_CLINT_H
#define _ASM_RISCV_CLINT_H 1
#include <linux/io.h>
#include <linux/smp.h>
#ifdef CONFIG_RISCV_M_MODE
extern u32 __iomem *clint_ipi_base;
void clint_init_boot_cpu(void);
static inline void clint_send_ipi_single(unsigned long hartid)
{
writel(1, clint_ipi_base + hartid);
}
static inline void clint_send_ipi_mask(const struct cpumask *hartid_mask)
{
int hartid;
for_each_cpu(hartid, hartid_mask)
clint_send_ipi_single(hartid);
}
static inline void clint_clear_ipi(unsigned long hartid)
{
writel(0, clint_ipi_base + hartid);
}
#else /* CONFIG_RISCV_M_MODE */
#define clint_init_boot_cpu() do { } while (0)
/* stubs to for code is only reachable under IS_ENABLED(CONFIG_RISCV_M_MODE): */
void clint_send_ipi_single(unsigned long hartid);
void clint_send_ipi_mask(const struct cpumask *hartid_mask);
void clint_clear_ipi(unsigned long hartid);
#endif /* CONFIG_RISCV_M_MODE */
#endif /* _ASM_RISCV_CLINT_H */
...@@ -11,8 +11,11 @@ ...@@ -11,8 +11,11 @@
/* Status register flags */ /* Status register flags */
#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */ #define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
#define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */
#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */ #define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
#define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */
#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */ #define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
#define SR_MPP _AC(0x00001800, UL) /* Previously Machine */
#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */ #define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */ #define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
...@@ -44,9 +47,10 @@ ...@@ -44,9 +47,10 @@
#define SATP_MODE SATP_MODE_39 #define SATP_MODE SATP_MODE_39
#endif #endif
/* SCAUSE */ /* Exception cause high bit - is an interrupt if set */
#define SCAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1)) #define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
/* Interrupt causes (minus the high bit) */
#define IRQ_U_SOFT 0 #define IRQ_U_SOFT 0
#define IRQ_S_SOFT 1 #define IRQ_S_SOFT 1
#define IRQ_M_SOFT 3 #define IRQ_M_SOFT 3
...@@ -57,6 +61,7 @@ ...@@ -57,6 +61,7 @@
#define IRQ_S_EXT 9 #define IRQ_S_EXT 9
#define IRQ_M_EXT 11 #define IRQ_M_EXT 11
/* Exception causes */
#define EXC_INST_MISALIGNED 0 #define EXC_INST_MISALIGNED 0
#define EXC_INST_ACCESS 1 #define EXC_INST_ACCESS 1
#define EXC_BREAKPOINT 3 #define EXC_BREAKPOINT 3
...@@ -67,14 +72,14 @@ ...@@ -67,14 +72,14 @@
#define EXC_LOAD_PAGE_FAULT 13 #define EXC_LOAD_PAGE_FAULT 13
#define EXC_STORE_PAGE_FAULT 15 #define EXC_STORE_PAGE_FAULT 15
/* SIE (Interrupt Enable) and SIP (Interrupt Pending) flags */ /* symbolic CSR names: */
#define SIE_SSIE (_AC(0x1, UL) << IRQ_S_SOFT)
#define SIE_STIE (_AC(0x1, UL) << IRQ_S_TIMER)
#define SIE_SEIE (_AC(0x1, UL) << IRQ_S_EXT)
#define CSR_CYCLE 0xc00 #define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01 #define CSR_TIME 0xc01
#define CSR_INSTRET 0xc02 #define CSR_INSTRET 0xc02
#define CSR_CYCLEH 0xc80
#define CSR_TIMEH 0xc81
#define CSR_INSTRETH 0xc82
#define CSR_SSTATUS 0x100 #define CSR_SSTATUS 0x100
#define CSR_SIE 0x104 #define CSR_SIE 0x104
#define CSR_STVEC 0x105 #define CSR_STVEC 0x105
...@@ -85,9 +90,58 @@ ...@@ -85,9 +90,58 @@
#define CSR_STVAL 0x143 #define CSR_STVAL 0x143
#define CSR_SIP 0x144 #define CSR_SIP 0x144
#define CSR_SATP 0x180 #define CSR_SATP 0x180
#define CSR_CYCLEH 0xc80
#define CSR_TIMEH 0xc81 #define CSR_MSTATUS 0x300
#define CSR_INSTRETH 0xc82 #define CSR_MISA 0x301
#define CSR_MIE 0x304
#define CSR_MTVEC 0x305
#define CSR_MSCRATCH 0x340
#define CSR_MEPC 0x341
#define CSR_MCAUSE 0x342
#define CSR_MTVAL 0x343
#define CSR_MIP 0x344
#define CSR_MHARTID 0xf14
#ifdef CONFIG_RISCV_M_MODE
# define CSR_STATUS CSR_MSTATUS
# define CSR_IE CSR_MIE
# define CSR_TVEC CSR_MTVEC
# define CSR_SCRATCH CSR_MSCRATCH
# define CSR_EPC CSR_MEPC
# define CSR_CAUSE CSR_MCAUSE
# define CSR_TVAL CSR_MTVAL
# define CSR_IP CSR_MIP
# define SR_IE SR_MIE
# define SR_PIE SR_MPIE
# define SR_PP SR_MPP
# define IRQ_SOFT IRQ_M_SOFT
# define IRQ_TIMER IRQ_M_TIMER
# define IRQ_EXT IRQ_M_EXT
#else /* CONFIG_RISCV_M_MODE */
# define CSR_STATUS CSR_SSTATUS
# define CSR_IE CSR_SIE
# define CSR_TVEC CSR_STVEC
# define CSR_SCRATCH CSR_SSCRATCH
# define CSR_EPC CSR_SEPC
# define CSR_CAUSE CSR_SCAUSE
# define CSR_TVAL CSR_STVAL
# define CSR_IP CSR_SIP
# define SR_IE SR_SIE
# define SR_PIE SR_SPIE
# define SR_PP SR_SPP
# define IRQ_SOFT IRQ_S_SOFT
# define IRQ_TIMER IRQ_S_TIMER
# define IRQ_EXT IRQ_S_EXT
#endif /* CONFIG_RISCV_M_MODE */
/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
#define IE_SIE (_AC(0x1, UL) << IRQ_SOFT)
#define IE_TIE (_AC(0x1, UL) << IRQ_TIMER)
#define IE_EIE (_AC(0x1, UL) << IRQ_EXT)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -7,8 +7,8 @@ ...@@ -7,8 +7,8 @@
*/ */
#ifndef __ASM_CURRENT_H #ifndef _ASM_RISCV_CURRENT_H
#define __ASM_CURRENT_H #define _ASM_RISCV_CURRENT_H
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/compiler.h> #include <linux/compiler.h>
...@@ -34,4 +34,4 @@ static __always_inline struct task_struct *get_current(void) ...@@ -34,4 +34,4 @@ static __always_inline struct task_struct *get_current(void)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_CURRENT_H */ #endif /* _ASM_RISCV_CURRENT_H */
...@@ -56,16 +56,16 @@ extern unsigned long elf_hwcap; ...@@ -56,16 +56,16 @@ extern unsigned long elf_hwcap;
*/ */
#define ELF_PLATFORM (NULL) #define ELF_PLATFORM (NULL)
#ifdef CONFIG_MMU
#define ARCH_DLINFO \ #define ARCH_DLINFO \
do { \ do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(elf_addr_t)current->mm->context.vdso); \ (elf_addr_t)current->mm->context.vdso); \
} while (0) } while (0)
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
struct linux_binprm; struct linux_binprm;
extern int arch_setup_additional_pages(struct linux_binprm *bprm, extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp); int uses_interp);
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_ELF_H */ #endif /* _ASM_RISCV_ELF_H */
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#ifdef CONFIG_MMU
/* /*
* Here we define all the compile-time 'special' virtual addresses. * Here we define all the compile-time 'special' virtual addresses.
* The point is to have a constant address at compile time, but to * The point is to have a constant address at compile time, but to
...@@ -42,4 +43,5 @@ extern void __set_fixmap(enum fixed_addresses idx, ...@@ -42,4 +43,5 @@ extern void __set_fixmap(enum fixed_addresses idx,
#include <asm-generic/fixmap.h> #include <asm-generic/fixmap.h>
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_FIXMAP_H */ #endif /* _ASM_RISCV_FIXMAP_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2017 Andes Technology Corporation */ /* Copyright (C) 2017 Andes Technology Corporation */
#ifndef _ASM_RISCV_FTRACE_H
#define _ASM_RISCV_FTRACE_H
/* /*
* The graph frame test is not possible if CONFIG_FRAME_POINTER is not enabled. * The graph frame test is not possible if CONFIG_FRAME_POINTER is not enabled.
* Check arch/riscv/kernel/mcount.S for detail. * Check arch/riscv/kernel/mcount.S for detail.
...@@ -64,3 +67,5 @@ do { \ ...@@ -64,3 +67,5 @@ do { \
*/ */
#define MCOUNT_INSN_SIZE 8 #define MCOUNT_INSN_SIZE 8
#endif #endif
#endif /* _ASM_RISCV_FTRACE_H */
...@@ -4,14 +4,20 @@ ...@@ -4,14 +4,20 @@
* Copyright (c) 2018 Jim Wilson (jimw@sifive.com) * Copyright (c) 2018 Jim Wilson (jimw@sifive.com)
*/ */
#ifndef _ASM_FUTEX_H #ifndef _ASM_RISCV_FUTEX_H
#define _ASM_FUTEX_H #define _ASM_RISCV_FUTEX_H
#include <linux/futex.h> #include <linux/futex.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <asm/asm.h> #include <asm/asm.h>
/* We don't even really need the extable code, but for now keep it simple */
#ifndef CONFIG_MMU
#define __enable_user_access() do { } while (0)
#define __disable_user_access() do { } while (0)
#endif
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \ { \
uintptr_t tmp; \ uintptr_t tmp; \
...@@ -112,4 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -112,4 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return ret; return ret;
} }
#endif /* _ASM_FUTEX_H */ #endif /* _ASM_RISCV_FUTEX_H */
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
* Copyright (C) 2012 ARM Ltd. * Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2017 SiFive * Copyright (C) 2017 SiFive
*/ */
#ifndef __ASM_HWCAP_H #ifndef _ASM_RISCV_HWCAP_H
#define __ASM_HWCAP_H #define _ASM_RISCV_HWCAP_H
#include <uapi/asm/hwcap.h> #include <uapi/asm/hwcap.h>
...@@ -23,4 +23,5 @@ enum { ...@@ -23,4 +23,5 @@ enum {
extern unsigned long elf_hwcap; extern unsigned long elf_hwcap;
#endif #endif
#endif
#endif /* _ASM_RISCV_HWCAP_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_IMAGE_H #ifndef _ASM_RISCV_IMAGE_H
#define __ASM_IMAGE_H #define _ASM_RISCV_IMAGE_H
#define RISCV_IMAGE_MAGIC "RISCV\0\0\0" #define RISCV_IMAGE_MAGIC "RISCV\0\0\0"
#define RISCV_IMAGE_MAGIC2 "RSC\x05" #define RISCV_IMAGE_MAGIC2 "RSC\x05"
...@@ -62,4 +62,4 @@ struct riscv_image_header { ...@@ -62,4 +62,4 @@ struct riscv_image_header {
u32 res4; u32 res4;
}; };
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_IMAGE_H */ #endif /* _ASM_RISCV_IMAGE_H */
...@@ -15,158 +15,19 @@ ...@@ -15,158 +15,19 @@
#include <asm/mmiowb.h> #include <asm/mmiowb.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
/*
* The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
* change the properties of memory regions. This should be fixed by the
* upcoming platform spec.
*/
#define ioremap_nocache(addr, size) ioremap((addr), (size))
#define ioremap_wc(addr, size) ioremap((addr), (size))
#define ioremap_wt(addr, size) ioremap((addr), (size))
extern void iounmap(volatile void __iomem *addr);
/* Generic IO read/write. These perform native-endian accesses. */
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
}
#define __raw_writew __raw_writew
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
}
#define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
}
#ifdef CONFIG_64BIT
#define __raw_writeq __raw_writeq
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
{
asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
}
#endif
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#ifdef CONFIG_64BIT
#define __raw_readq __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
u64 val;
asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#endif
/* /*
* Unordered I/O memory access primitives. These are even more relaxed than * MMIO access functions are separated out to break dependency cycles
* the relaxed versions, as they don't even order accesses between successive * when using {read,write}* fns in low-level headers
* operations to the I/O regions.
*/ */
#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; }) #include <asm/mmio.h>
#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
#define writeb_cpu(v,c) ((void)__raw_writeb((v),(c)))
#define writew_cpu(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
#define writel_cpu(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
#ifdef CONFIG_64BIT
#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
#define writeq_cpu(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
#endif
/*
* Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory
* accesses. These are defined to order the indicated access (either a read or
* write) with all other I/O memory accesses. Since the platform specification
* defines that all I/O regions are strongly ordered on channel 2, no explicit
* fences are required to enforce this ordering.
*/
/* FIXME: These are now the same as asm-generic */
#define __io_rbr() do {} while (0)
#define __io_rar() do {} while (0)
#define __io_rbw() do {} while (0)
#define __io_raw() do {} while (0)
#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
#define writeb_relaxed(v,c) ({ __io_rbw(); writeb_cpu((v),(c)); __io_raw(); })
#define writew_relaxed(v,c) ({ __io_rbw(); writew_cpu((v),(c)); __io_raw(); })
#define writel_relaxed(v,c) ({ __io_rbw(); writel_cpu((v),(c)); __io_raw(); })
#ifdef CONFIG_64BIT
#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
#define writeq_relaxed(v,c) ({ __io_rbw(); writeq_cpu((v),(c)); __io_raw(); })
#endif
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access. The memory barriers here are necessary as RISC-V
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory");
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory");
#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
#define writeb(v,c) ({ __io_bw(); writeb_cpu((v),(c)); __io_aw(); })
#define writew(v,c) ({ __io_bw(); writew_cpu((v),(c)); __io_aw(); })
#define writel(v,c) ({ __io_bw(); writel_cpu((v),(c)); __io_aw(); })
#ifdef CONFIG_64BIT
#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
#define writeq(v,c) ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); })
#endif
/* /*
* I/O port access constants. * I/O port access constants.
*/ */
#ifdef CONFIG_MMU
#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1) #define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
#define PCI_IOBASE ((void __iomem *)PCI_IO_START) #define PCI_IOBASE ((void __iomem *)PCI_IO_START)
#endif /* CONFIG_MMU */
/* /*
* Emulation routines for the port-mapped IO space used by some PCI drivers. * Emulation routines for the port-mapped IO space used by some PCI drivers.
......
...@@ -13,31 +13,31 @@ ...@@ -13,31 +13,31 @@
/* read interrupt enabled status */ /* read interrupt enabled status */
static inline unsigned long arch_local_save_flags(void) static inline unsigned long arch_local_save_flags(void)
{ {
return csr_read(CSR_SSTATUS); return csr_read(CSR_STATUS);
} }
/* unconditionally enable interrupts */ /* unconditionally enable interrupts */
static inline void arch_local_irq_enable(void) static inline void arch_local_irq_enable(void)
{ {
csr_set(CSR_SSTATUS, SR_SIE); csr_set(CSR_STATUS, SR_IE);
} }
/* unconditionally disable interrupts */ /* unconditionally disable interrupts */
static inline void arch_local_irq_disable(void) static inline void arch_local_irq_disable(void)
{ {
csr_clear(CSR_SSTATUS, SR_SIE); csr_clear(CSR_STATUS, SR_IE);
} }
/* get status and disable interrupts */ /* get status and disable interrupts */
static inline unsigned long arch_local_irq_save(void) static inline unsigned long arch_local_irq_save(void)
{ {
return csr_read_clear(CSR_SSTATUS, SR_SIE); return csr_read_clear(CSR_STATUS, SR_IE);
} }
/* test flags */ /* test flags */
static inline int arch_irqs_disabled_flags(unsigned long flags) static inline int arch_irqs_disabled_flags(unsigned long flags)
{ {
return !(flags & SR_SIE); return !(flags & SR_IE);
} }
/* test hardware interrupt enable bit */ /* test hardware interrupt enable bit */
...@@ -49,7 +49,7 @@ static inline int arch_irqs_disabled(void) ...@@ -49,7 +49,7 @@ static inline int arch_irqs_disabled(void)
/* set interrupt enabled status */ /* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags) static inline void arch_local_irq_restore(unsigned long flags)
{ {
csr_set(CSR_SSTATUS, flags & SR_SIE); csr_set(CSR_STATUS, flags & SR_IE);
} }
#endif /* _ASM_RISCV_IRQFLAGS_H */ #endif /* _ASM_RISCV_IRQFLAGS_H */
...@@ -6,9 +6,9 @@ ...@@ -6,9 +6,9 @@
* Copyright (C) 2017 SiFive * Copyright (C) 2017 SiFive
*/ */
#ifndef _RISCV_KPROBES_H #ifndef _ASM_RISCV_KPROBES_H
#define _RISCV_KPROBES_H #define _ASM_RISCV_KPROBES_H
#include <asm-generic/kprobes.h> #include <asm-generic/kprobes.h>
#endif /* _RISCV_KPROBES_H */ #endif /* _ASM_RISCV_KPROBES_H */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
* which was based on arch/arm/include/io.h
*
* Copyright (C) 1996-2000 Russell King
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2014 Regents of the University of California
*/
#ifndef _ASM_RISCV_MMIO_H
#define _ASM_RISCV_MMIO_H
#include <linux/types.h>
#include <asm/mmiowb.h>
#ifdef CONFIG_MMU
void __iomem *ioremap(phys_addr_t offset, unsigned long size);
/*
* The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
* change the properties of memory regions. This should be fixed by the
* upcoming platform spec.
*/
#define ioremap_nocache(addr, size) ioremap((addr), (size))
#define ioremap_wc(addr, size) ioremap((addr), (size))
#define ioremap_wt(addr, size) ioremap((addr), (size))
void iounmap(volatile void __iomem *addr);
#else
#define pgprot_noncached(x) (x)
#endif /* CONFIG_MMU */
/* Generic IO read/write. These perform native-endian accesses. */
#define __raw_writeb __raw_writeb
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
{
asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
}
#define __raw_writew __raw_writew
static inline void __raw_writew(u16 val, volatile void __iomem *addr)
{
asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
}
#define __raw_writel __raw_writel
static inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
}
#ifdef CONFIG_64BIT
#define __raw_writeq __raw_writeq
static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
{
asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
}
#endif
#define __raw_readb __raw_readb
static inline u8 __raw_readb(const volatile void __iomem *addr)
{
u8 val;
asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#define __raw_readw __raw_readw
static inline u16 __raw_readw(const volatile void __iomem *addr)
{
u16 val;
asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#define __raw_readl __raw_readl
static inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#ifdef CONFIG_64BIT
#define __raw_readq __raw_readq
static inline u64 __raw_readq(const volatile void __iomem *addr)
{
u64 val;
asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
return val;
}
#endif
/*
* Unordered I/O memory access primitives. These are even more relaxed than
* the relaxed versions, as they don't even order accesses between successive
* operations to the I/O regions.
*/
#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c)))
#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c)))
#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c)))
#ifdef CONFIG_64BIT
#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c)))
#endif
/*
* Relaxed I/O memory access primitives. These follow the Device memory
* ordering rules but do not guarantee any ordering relative to Normal memory
* accesses. These are defined to order the indicated access (either a read or
* write) with all other I/O memory accesses. Since the platform specification
* defines that all I/O regions are strongly ordered on channel 2, no explicit
* fences are required to enforce this ordering.
*/
/* FIXME: These are now the same as asm-generic */
#define __io_rbr() do {} while (0)
#define __io_rar() do {} while (0)
#define __io_rbw() do {} while (0)
#define __io_raw() do {} while (0)
#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); })
#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); })
#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); })
#ifdef CONFIG_64BIT
#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); })
#endif
/*
* I/O memory access primitives. Reads are ordered relative to any
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access. The memory barriers here are necessary as RISC-V
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory")
#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory")
#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); })
#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); })
#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); })
#ifdef CONFIG_64BIT
#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); })
#endif
#endif /* _ASM_RISCV_MMIO_H */
...@@ -11,4 +11,4 @@ ...@@ -11,4 +11,4 @@
#include <asm-generic/mmiowb.h> #include <asm-generic/mmiowb.h>
#endif /* ASM_RISCV_MMIOWB_H */ #endif /* _ASM_RISCV_MMIOWB_H */
...@@ -10,6 +10,9 @@ ...@@ -10,6 +10,9 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
typedef struct { typedef struct {
#ifndef CONFIG_MMU
unsigned long end_brk;
#endif
void *vdso; void *vdso;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* A local icache flush is needed before user execution can resume. */ /* A local icache flush is needed before user execution can resume. */
......
...@@ -88,8 +88,14 @@ typedef struct page *pgtable_t; ...@@ -88,8 +88,14 @@ typedef struct page *pgtable_t;
#define PTE_FMT "%08lx" #define PTE_FMT "%08lx"
#endif #endif
#ifdef CONFIG_MMU
extern unsigned long va_pa_offset; extern unsigned long va_pa_offset;
extern unsigned long pfn_base; extern unsigned long pfn_base;
#define ARCH_PFN_OFFSET (pfn_base)
#else
#define va_pa_offset 0
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */
extern unsigned long max_low_pfn; extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn; extern unsigned long min_low_pfn;
...@@ -112,11 +118,9 @@ extern unsigned long min_low_pfn; ...@@ -112,11 +118,9 @@ extern unsigned long min_low_pfn;
#ifdef CONFIG_FLATMEM #ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) \ #define pfn_valid(pfn) \
(((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr)) (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr))
#endif #endif
#define ARCH_PFN_OFFSET (pfn_base)
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr))) #define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
* Copyright (C) 2016 SiFive * Copyright (C) 2016 SiFive
*/ */
#ifndef __ASM_RISCV_PCI_H #ifndef _ASM_RISCV_PCI_H
#define __ASM_RISCV_PCI_H #define _ASM_RISCV_PCI_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -34,4 +34,4 @@ static inline int pci_proc_domain(struct pci_bus *bus) ...@@ -34,4 +34,4 @@ static inline int pci_proc_domain(struct pci_bus *bus)
} }
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
#endif /* __ASM_PCI_H */ #endif /* _ASM_RISCV_PCI_H */
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/tlb.h> #include <asm/tlb.h>
#ifdef CONFIG_MMU
#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */ #include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
static inline void pmd_populate_kernel(struct mm_struct *mm, static inline void pmd_populate_kernel(struct mm_struct *mm,
...@@ -81,5 +82,6 @@ do { \ ...@@ -81,5 +82,6 @@ do { \
pgtable_pte_page_dtor(pte); \ pgtable_pte_page_dtor(pte); \
tlb_remove_page((tlb), pte); \ tlb_remove_page((tlb), pte); \
} while (0) } while (0)
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_PGALLOC_H */ #endif /* _ASM_RISCV_PGALLOC_H */
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <asm/pgtable-32.h> #include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
#ifdef CONFIG_MMU
/* Number of entries in the page global directory */ /* Number of entries in the page global directory */
#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
/* Number of entries in the page table */ /* Number of entries in the page table */
...@@ -32,7 +33,6 @@ ...@@ -32,7 +33,6 @@
/* Number of PGD entries that a user-mode program can use */ /* Number of PGD entries that a user-mode program can use */
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
/* Page protection bits */ /* Page protection bits */
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
...@@ -84,42 +84,6 @@ extern pgd_t swapper_pg_dir[]; ...@@ -84,42 +84,6 @@ extern pgd_t swapper_pg_dir[];
#define __S110 PAGE_SHARED_EXEC #define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
#define PCI_IO_SIZE SZ_16M
/*
* Roughly size the vmemmap space to be large enough to fit enough
* struct pages to map half the virtual address space. Then
* position vmemmap directly below the VMALLOC region.
*/
#define VMEMMAP_SHIFT \
(CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
#define VMEMMAP_END (VMALLOC_START - 1)
#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
#define vmemmap ((struct page *)VMEMMAP_START)
#define PCI_IO_END VMEMMAP_START
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP PCI_IO_START
#ifdef CONFIG_64BIT
#define FIXADDR_SIZE PMD_SIZE
#else
#define FIXADDR_SIZE PGDIR_SIZE
#endif
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
static inline int pmd_present(pmd_t pmd) static inline int pmd_present(pmd_t pmd)
{ {
return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
...@@ -430,11 +394,34 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, ...@@ -430,11 +394,34 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define kern_addr_valid(addr) (1) /* FIXME */ #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
#define VMALLOC_END (PAGE_OFFSET - 1)
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
extern void *dtb_early_va; /*
extern void setup_bootmem(void); * Roughly size the vmemmap space to be large enough to fit enough
extern void paging_init(void); * struct pages to map half the virtual address space. Then
* position vmemmap directly below the VMALLOC region.
*/
#define VMEMMAP_SHIFT \
(CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
#define VMEMMAP_END (VMALLOC_START - 1)
#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
#define vmemmap ((struct page *)VMEMMAP_START)
#define PCI_IO_SIZE SZ_16M
#define PCI_IO_END VMEMMAP_START
#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
#define FIXADDR_TOP PCI_IO_START
#ifdef CONFIG_64BIT
#define FIXADDR_SIZE PMD_SIZE
#else
#define FIXADDR_SIZE PGDIR_SIZE
#endif
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/* /*
* Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
...@@ -446,6 +433,31 @@ extern void paging_init(void); ...@@ -446,6 +433,31 @@ extern void paging_init(void);
#define TASK_SIZE FIXADDR_START #define TASK_SIZE FIXADDR_START
#endif #endif
#else /* CONFIG_MMU */
#define PAGE_KERNEL __pgprot(0)
#define swapper_pg_dir NULL
#define VMALLOC_START 0
#define TASK_SIZE 0xffffffffUL
#endif /* !CONFIG_MMU */
#define kern_addr_valid(addr) (1) /* FIXME */
extern void *dtb_early_va;
void setup_bootmem(void);
void paging_init(void);
#define FIRST_USER_ADDRESS 0
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#include <asm-generic/pgtable.h> #include <asm-generic/pgtable.h>
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -42,7 +42,7 @@ struct thread_struct { ...@@ -42,7 +42,7 @@ struct thread_struct {
((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \ ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
- ALIGN(sizeof(struct pt_regs), STACK_ALIGN))) - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->sepc) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
struct pt_regs { struct pt_regs {
unsigned long sepc; unsigned long epc;
unsigned long ra; unsigned long ra;
unsigned long sp; unsigned long sp;
unsigned long gp; unsigned long gp;
...@@ -44,10 +44,10 @@ struct pt_regs { ...@@ -44,10 +44,10 @@ struct pt_regs {
unsigned long t4; unsigned long t4;
unsigned long t5; unsigned long t5;
unsigned long t6; unsigned long t6;
/* Supervisor CSRs */ /* Supervisor/Machine CSRs */
unsigned long sstatus; unsigned long status;
unsigned long sbadaddr; unsigned long badaddr;
unsigned long scause; unsigned long cause;
/* a0 value before the syscall */ /* a0 value before the syscall */
unsigned long orig_a0; unsigned long orig_a0;
}; };
...@@ -58,18 +58,18 @@ struct pt_regs { ...@@ -58,18 +58,18 @@ struct pt_regs {
#define REG_FMT "%08lx" #define REG_FMT "%08lx"
#endif #endif
#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0) #define user_mode(regs) (((regs)->status & SR_PP) == 0)
/* Helpers for working with the instruction pointer */ /* Helpers for working with the instruction pointer */
static inline unsigned long instruction_pointer(struct pt_regs *regs) static inline unsigned long instruction_pointer(struct pt_regs *regs)
{ {
return regs->sepc; return regs->epc;
} }
static inline void instruction_pointer_set(struct pt_regs *regs, static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val) unsigned long val)
{ {
regs->sepc = val; regs->epc = val;
} }
#define profile_pc(regs) instruction_pointer(regs) #define profile_pc(regs) instruction_pointer(regs)
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/types.h> #include <linux/types.h>
#ifdef CONFIG_RISCV_SBI
#define SBI_SET_TIMER 0 #define SBI_SET_TIMER 0
#define SBI_CONSOLE_PUTCHAR 1 #define SBI_CONSOLE_PUTCHAR 1
#define SBI_CONSOLE_GETCHAR 2 #define SBI_CONSOLE_GETCHAR 2
...@@ -93,5 +94,11 @@ static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask, ...@@ -93,5 +94,11 @@ static inline void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
{ {
SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid); SBI_CALL_4(SBI_REMOTE_SFENCE_VMA_ASID, hart_mask, start, size, asid);
} }
#else /* CONFIG_RISCV_SBI */
#endif /* stubs for code that is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */
void sbi_set_timer(uint64_t stime_value);
void sbi_clear_ipi(void);
void sbi_send_ipi(const unsigned long *hart_mask);
void sbi_remote_fence_i(const unsigned long *hart_mask);
#endif /* CONFIG_RISCV_SBI */
#endif /* _ASM_RISCV_SBI_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_SECCOMP_H
#define _ASM_SECCOMP_H
#include <asm/unistd.h>
#include <asm-generic/seccomp.h>
#endif /* _ASM_SECCOMP_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SPARSEMEM_H #ifndef _ASM_RISCV_SPARSEMEM_H
#define __ASM_SPARSEMEM_H #define _ASM_RISCV_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
#define MAX_PHYSMEM_BITS CONFIG_PA_BITS #define MAX_PHYSMEM_BITS CONFIG_PA_BITS
#define SECTION_SIZE_BITS 27 #define SECTION_SIZE_BITS 27
#endif /* CONFIG_SPARSEMEM */ #endif /* CONFIG_SPARSEMEM */
#endif /* __ASM_SPARSEMEM_H */ #endif /* _ASM_RISCV_SPARSEMEM_H */
...@@ -22,4 +22,4 @@ typedef struct { ...@@ -22,4 +22,4 @@ typedef struct {
#define __ARCH_RW_LOCK_UNLOCKED { 0 } #define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif #endif /* _ASM_RISCV_SPINLOCK_TYPES_H */
...@@ -17,19 +17,19 @@ extern void __fstate_restore(struct task_struct *restore_from); ...@@ -17,19 +17,19 @@ extern void __fstate_restore(struct task_struct *restore_from);
static inline void __fstate_clean(struct pt_regs *regs) static inline void __fstate_clean(struct pt_regs *regs)
{ {
regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN; regs->status = (regs->status & ~SR_FS) | SR_FS_CLEAN;
} }
static inline void fstate_off(struct task_struct *task, static inline void fstate_off(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_OFF; regs->status = (regs->status & ~SR_FS) | SR_FS_OFF;
} }
static inline void fstate_save(struct task_struct *task, static inline void fstate_save(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) { if ((regs->status & SR_FS) == SR_FS_DIRTY) {
__fstate_save(task); __fstate_save(task);
__fstate_clean(regs); __fstate_clean(regs);
} }
...@@ -38,7 +38,7 @@ static inline void fstate_save(struct task_struct *task, ...@@ -38,7 +38,7 @@ static inline void fstate_save(struct task_struct *task,
static inline void fstate_restore(struct task_struct *task, static inline void fstate_restore(struct task_struct *task,
struct pt_regs *regs) struct pt_regs *regs)
{ {
if ((regs->sstatus & SR_FS) != SR_FS_OFF) { if ((regs->status & SR_FS) != SR_FS_OFF) {
__fstate_restore(task); __fstate_restore(task);
__fstate_clean(regs); __fstate_clean(regs);
} }
...@@ -50,7 +50,7 @@ static inline void __switch_to_aux(struct task_struct *prev, ...@@ -50,7 +50,7 @@ static inline void __switch_to_aux(struct task_struct *prev,
struct pt_regs *regs; struct pt_regs *regs;
regs = task_pt_regs(prev); regs = task_pt_regs(prev);
if (unlikely(regs->sstatus & SR_SD)) if (unlikely(regs->status & SR_SD))
fstate_save(prev, regs); fstate_save(prev, regs);
fstate_restore(next, task_pt_regs(next)); fstate_restore(next, task_pt_regs(next));
} }
......
...@@ -75,6 +75,7 @@ struct thread_info { ...@@ -75,6 +75,7 @@ struct thread_info {
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ #define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing */
#define TIF_SECCOMP 8 /* syscall secure computing */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
...@@ -82,11 +83,13 @@ struct thread_info { ...@@ -82,11 +83,13 @@ struct thread_info {
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_WORK_MASK \ #define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED) (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
#define _TIF_SYSCALL_WORK \ #define _TIF_SYSCALL_WORK \
(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT) (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP)
#endif /* _ASM_RISCV_THREAD_INFO_H */ #endif /* _ASM_RISCV_THREAD_INFO_H */
...@@ -7,12 +7,25 @@ ...@@ -7,12 +7,25 @@
#define _ASM_RISCV_TIMEX_H #define _ASM_RISCV_TIMEX_H
#include <asm/csr.h> #include <asm/csr.h>
#include <asm/mmio.h>
typedef unsigned long cycles_t; typedef unsigned long cycles_t;
extern u64 __iomem *riscv_time_val;
extern u64 __iomem *riscv_time_cmp;
#ifdef CONFIG_64BIT
#define mmio_get_cycles() readq_relaxed(riscv_time_val)
#else
#define mmio_get_cycles() readl_relaxed(riscv_time_val)
#define mmio_get_cycles_hi() readl_relaxed(((u32 *)riscv_time_val) + 1)
#endif
static inline cycles_t get_cycles(void) static inline cycles_t get_cycles(void)
{ {
return csr_read(CSR_TIME); if (IS_ENABLED(CONFIG_RISCV_SBI))
return csr_read(CSR_TIME);
return mmio_get_cycles();
} }
#define get_cycles get_cycles #define get_cycles get_cycles
...@@ -24,7 +37,9 @@ static inline u64 get_cycles64(void) ...@@ -24,7 +37,9 @@ static inline u64 get_cycles64(void)
#else /* CONFIG_64BIT */ #else /* CONFIG_64BIT */
static inline u32 get_cycles_hi(void) static inline u32 get_cycles_hi(void)
{ {
return csr_read(CSR_TIMEH); if (IS_ENABLED(CONFIG_RISCV_SBI))
return csr_read(CSR_TIMEH);
return mmio_get_cycles_hi();
} }
static inline u64 get_cycles64(void) static inline u64 get_cycles64(void)
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <asm/smp.h> #include <asm/smp.h>
#ifdef CONFIG_MMU
static inline void local_flush_tlb_all(void) static inline void local_flush_tlb_all(void)
{ {
__asm__ __volatile__ ("sfence.vma" : : : "memory"); __asm__ __volatile__ ("sfence.vma" : : : "memory");
...@@ -20,14 +21,19 @@ static inline void local_flush_tlb_page(unsigned long addr) ...@@ -20,14 +21,19 @@ static inline void local_flush_tlb_page(unsigned long addr)
{ {
__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"); __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
} }
#else /* CONFIG_MMU */
#define local_flush_tlb_all() do { } while (0)
#define local_flush_tlb_page(addr) do { } while (0)
#endif /* CONFIG_MMU */
#ifdef CONFIG_SMP #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
void flush_tlb_all(void); void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm); void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end); unsigned long end);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP && CONFIG_MMU */
#define flush_tlb_all() local_flush_tlb_all() #define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr) #define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
...@@ -38,7 +44,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, ...@@ -38,7 +44,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
} }
#define flush_tlb_mm(mm) flush_tlb_all() #define flush_tlb_mm(mm) flush_tlb_all()
#endif /* CONFIG_SMP */ #endif /* !CONFIG_SMP || !CONFIG_MMU */
/* Flush a range of kernel pages */ /* Flush a range of kernel pages */
static inline void flush_tlb_kernel_range(unsigned long start, static inline void flush_tlb_kernel_range(unsigned long start,
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
/* /*
* User space memory access functions * User space memory access functions
*/ */
#ifdef CONFIG_MMU
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
...@@ -475,4 +476,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n) ...@@ -475,4 +476,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
__ret; \ __ret; \
}) })
#else /* CONFIG_MMU */
#include <asm-generic/uaccess.h>
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_UACCESS_H */ #endif /* _ASM_RISCV_UACCESS_H */
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
* (at your option) any later version. * (at your option) any later version.
*/ */
#ifndef _UAPI_ASM_ELF_H #ifndef _UAPI_ASM_RISCV_ELF_H
#define _UAPI_ASM_ELF_H #define _UAPI_ASM_RISCV_ELF_H
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -95,4 +95,4 @@ typedef union __riscv_fp_state elf_fpregset_t; ...@@ -95,4 +95,4 @@ typedef union __riscv_fp_state elf_fpregset_t;
#define R_RISCV_32_PCREL 57 #define R_RISCV_32_PCREL 57
#endif /* _UAPI_ASM_ELF_H */ #endif /* _UAPI_ASM_RISCV_ELF_H */
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
* Copyright (C) 2012 ARM Ltd. * Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2017 SiFive * Copyright (C) 2017 SiFive
*/ */
#ifndef __UAPI_ASM_HWCAP_H #ifndef _UAPI_ASM_RISCV_HWCAP_H
#define __UAPI_ASM_HWCAP_H #define _UAPI_ASM_RISCV_HWCAP_H
/* /*
* Linux saves the floating-point registers according to the ISA Linux is * Linux saves the floating-point registers according to the ISA Linux is
...@@ -22,4 +22,4 @@ ...@@ -22,4 +22,4 @@
#define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A')) #define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A'))
#define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A')) #define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A'))
#endif #endif /* _UAPI_ASM_RISCV_HWCAP_H */
...@@ -5,8 +5,8 @@ ...@@ -5,8 +5,8 @@
* *
* This file was copied from arch/arm64/include/uapi/asm/ucontext.h * This file was copied from arch/arm64/include/uapi/asm/ucontext.h
*/ */
#ifndef _UAPI__ASM_UCONTEXT_H #ifndef _UAPI_ASM_RISCV_UCONTEXT_H
#define _UAPI__ASM_UCONTEXT_H #define _UAPI_ASM_RISCV_UCONTEXT_H
#include <linux/types.h> #include <linux/types.h>
...@@ -31,4 +31,4 @@ struct ucontext { ...@@ -31,4 +31,4 @@ struct ucontext {
struct sigcontext uc_mcontext; struct sigcontext uc_mcontext;
}; };
#endif /* _UAPI__ASM_UCONTEXT_H */ #endif /* _UAPI_ASM_RISCV_UCONTEXT_H */
...@@ -25,10 +25,10 @@ obj-y += time.o ...@@ -25,10 +25,10 @@ obj-y += time.o
obj-y += traps.o obj-y += traps.o
obj-y += riscv_ksyms.o obj-y += riscv_ksyms.o
obj-y += stacktrace.o obj-y += stacktrace.o
obj-y += vdso.o
obj-y += cacheinfo.o obj-y += cacheinfo.o
obj-y += vdso/ obj-$(CONFIG_MMU) += vdso.o vdso/
obj-$(CONFIG_RISCV_M_MODE) += clint.o
obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
...@@ -41,5 +41,6 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o ...@@ -41,5 +41,6 @@ obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
obj-$(CONFIG_RISCV_SBI) += sbi.o
clean: clean:
...@@ -71,7 +71,7 @@ void asm_offsets(void) ...@@ -71,7 +71,7 @@ void asm_offsets(void)
OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr); OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
DEFINE(PT_SIZE, sizeof(struct pt_regs)); DEFINE(PT_SIZE, sizeof(struct pt_regs));
OFFSET(PT_SEPC, pt_regs, sepc); OFFSET(PT_EPC, pt_regs, epc);
OFFSET(PT_RA, pt_regs, ra); OFFSET(PT_RA, pt_regs, ra);
OFFSET(PT_FP, pt_regs, s0); OFFSET(PT_FP, pt_regs, s0);
OFFSET(PT_S0, pt_regs, s0); OFFSET(PT_S0, pt_regs, s0);
...@@ -105,9 +105,9 @@ void asm_offsets(void) ...@@ -105,9 +105,9 @@ void asm_offsets(void)
OFFSET(PT_T6, pt_regs, t6); OFFSET(PT_T6, pt_regs, t6);
OFFSET(PT_GP, pt_regs, gp); OFFSET(PT_GP, pt_regs, gp);
OFFSET(PT_ORIG_A0, pt_regs, orig_a0); OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
OFFSET(PT_SSTATUS, pt_regs, sstatus); OFFSET(PT_STATUS, pt_regs, status);
OFFSET(PT_SBADADDR, pt_regs, sbadaddr); OFFSET(PT_BADADDR, pt_regs, badaddr);
OFFSET(PT_SCAUSE, pt_regs, scause); OFFSET(PT_CAUSE, pt_regs, cause);
/* /*
* THREAD_{F,X}* might be larger than a S-type offset can handle, but * THREAD_{F,X}* might be larger than a S-type offset can handle, but
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 Christoph Hellwig.
*/
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/types.h>
#include <asm/clint.h>
#include <asm/csr.h>
#include <asm/timex.h>
#include <asm/smp.h>
/*
* This is the layout used by the SiFive clint, which is also shared by the qemu
* virt platform, and the Kendryte KD210 at least.
*/
#define CLINT_IPI_OFF 0
#define CLINT_TIME_CMP_OFF 0x4000
#define CLINT_TIME_VAL_OFF 0xbff8
u32 __iomem *clint_ipi_base;
void clint_init_boot_cpu(void)
{
struct device_node *np;
void __iomem *base;
np = of_find_compatible_node(NULL, NULL, "riscv,clint0");
if (!np) {
panic("clint not found");
return;
}
base = of_iomap(np, 0);
if (!base)
panic("could not map CLINT");
clint_ipi_base = base + CLINT_IPI_OFF;
riscv_time_cmp = base + CLINT_TIME_CMP_OFF;
riscv_time_val = base + CLINT_TIME_VAL_OFF;
clint_clear_ipi(boot_cpu_hartid);
}
...@@ -46,51 +46,12 @@ int riscv_of_processor_hartid(struct device_node *node) ...@@ -46,51 +46,12 @@ int riscv_of_processor_hartid(struct device_node *node)
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static void print_isa(struct seq_file *f, const char *orig_isa) static void print_isa(struct seq_file *f, const char *isa)
{ {
static const char *ext = "mafdcsu"; /* Print the entire ISA as it is */
const char *isa = orig_isa;
const char *e;
/*
* Linux doesn't support rv32e or rv128i, and we only support booting
* kernels on harts with the same ISA that the kernel is compiled for.
*/
#if defined(CONFIG_32BIT)
if (strncmp(isa, "rv32i", 5) != 0)
return;
#elif defined(CONFIG_64BIT)
if (strncmp(isa, "rv64i", 5) != 0)
return;
#endif
/* Print the base ISA, as we already know it's legal. */
seq_puts(f, "isa\t\t: "); seq_puts(f, "isa\t\t: ");
seq_write(f, isa, 5); seq_write(f, isa, strlen(isa));
isa += 5;
/*
* Check the rest of the ISA string for valid extensions, printing those
* we find. RISC-V ISA strings define an order, so we only print the
* extension bits when they're in order. Hide the supervisor (S)
* extension from userspace as it's not accessible from there.
*/
for (e = ext; *e != '\0'; ++e) {
if (isa[0] == e[0]) {
if (isa[0] != 's')
seq_write(f, isa, 1);
isa++;
}
}
seq_puts(f, "\n"); seq_puts(f, "\n");
/*
* If we were given an unsupported ISA in the device tree then print
* a bit of info describing what went wrong.
*/
if (isa[0] != '\0')
pr_info("unsupported ISA \"%s\" in device tree\n", orig_isa);
} }
static void print_mmu(struct seq_file *f, const char *mmu_type) static void print_mmu(struct seq_file *f, const char *mmu_type)
......
...@@ -26,14 +26,14 @@ ...@@ -26,14 +26,14 @@
/* /*
* If coming from userspace, preserve the user thread pointer and load * If coming from userspace, preserve the user thread pointer and load
* the kernel thread pointer. If we came from the kernel, sscratch * the kernel thread pointer. If we came from the kernel, the scratch
* will contain 0, and we should continue on the current TP. * register will contain 0, and we should continue on the current TP.
*/ */
csrrw tp, CSR_SSCRATCH, tp csrrw tp, CSR_SCRATCH, tp
bnez tp, _save_context bnez tp, _save_context
_restore_kernel_tpsp: _restore_kernel_tpsp:
csrr tp, CSR_SSCRATCH csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp) REG_S sp, TASK_TI_KERNEL_SP(tp)
_save_context: _save_context:
REG_S sp, TASK_TI_USER_SP(tp) REG_S sp, TASK_TI_USER_SP(tp)
...@@ -79,16 +79,16 @@ _save_context: ...@@ -79,16 +79,16 @@ _save_context:
li t0, SR_SUM | SR_FS li t0, SR_SUM | SR_FS
REG_L s0, TASK_TI_USER_SP(tp) REG_L s0, TASK_TI_USER_SP(tp)
csrrc s1, CSR_SSTATUS, t0 csrrc s1, CSR_STATUS, t0
csrr s2, CSR_SEPC csrr s2, CSR_EPC
csrr s3, CSR_STVAL csrr s3, CSR_TVAL
csrr s4, CSR_SCAUSE csrr s4, CSR_CAUSE
csrr s5, CSR_SSCRATCH csrr s5, CSR_SCRATCH
REG_S s0, PT_SP(sp) REG_S s0, PT_SP(sp)
REG_S s1, PT_SSTATUS(sp) REG_S s1, PT_STATUS(sp)
REG_S s2, PT_SEPC(sp) REG_S s2, PT_EPC(sp)
REG_S s3, PT_SBADADDR(sp) REG_S s3, PT_BADADDR(sp)
REG_S s4, PT_SCAUSE(sp) REG_S s4, PT_CAUSE(sp)
REG_S s5, PT_TP(sp) REG_S s5, PT_TP(sp)
.endm .endm
...@@ -97,7 +97,7 @@ _save_context: ...@@ -97,7 +97,7 @@ _save_context:
* registers from the stack. * registers from the stack.
*/ */
.macro RESTORE_ALL .macro RESTORE_ALL
REG_L a0, PT_SSTATUS(sp) REG_L a0, PT_STATUS(sp)
/* /*
* The current load reservation is effectively part of the processor's * The current load reservation is effectively part of the processor's
* state, in the sense that load reservations cannot be shared between * state, in the sense that load reservations cannot be shared between
...@@ -115,11 +115,11 @@ _save_context: ...@@ -115,11 +115,11 @@ _save_context:
* completes, implementations are allowed to expand reservations to be * completes, implementations are allowed to expand reservations to be
* arbitrarily large. * arbitrarily large.
*/ */
REG_L a2, PT_SEPC(sp) REG_L a2, PT_EPC(sp)
REG_SC x0, a2, PT_SEPC(sp) REG_SC x0, a2, PT_EPC(sp)
csrw CSR_SSTATUS, a0 csrw CSR_STATUS, a0
csrw CSR_SEPC, a2 csrw CSR_EPC, a2
REG_L x1, PT_RA(sp) REG_L x1, PT_RA(sp)
REG_L x3, PT_GP(sp) REG_L x3, PT_GP(sp)
...@@ -163,10 +163,10 @@ ENTRY(handle_exception) ...@@ -163,10 +163,10 @@ ENTRY(handle_exception)
SAVE_ALL SAVE_ALL
/* /*
* Set sscratch register to 0, so that if a recursive exception * Set the scratch register to 0, so that if a recursive exception
* occurs, the exception vector knows it came from the kernel * occurs, the exception vector knows it came from the kernel
*/ */
csrw CSR_SSCRATCH, x0 csrw CSR_SCRATCH, x0
/* Load the global pointer */ /* Load the global pointer */
.option push .option push
...@@ -185,11 +185,13 @@ ENTRY(handle_exception) ...@@ -185,11 +185,13 @@ ENTRY(handle_exception)
move a0, sp /* pt_regs */ move a0, sp /* pt_regs */
tail do_IRQ tail do_IRQ
1: 1:
/* Exceptions run with interrupts enabled or disabled /*
depending on the state of sstatus.SR_SPIE */ * Exceptions run with interrupts enabled or disabled depending on the
andi t0, s1, SR_SPIE * state of SR_PIE in m/sstatus.
*/
andi t0, s1, SR_PIE
beqz t0, 1f beqz t0, 1f
csrs CSR_SSTATUS, SR_SIE csrs CSR_STATUS, SR_IE
1: 1:
/* Handle syscalls */ /* Handle syscalls */
...@@ -217,7 +219,7 @@ handle_syscall: ...@@ -217,7 +219,7 @@ handle_syscall:
* scall instruction on sret * scall instruction on sret
*/ */
addi s2, s2, 0x4 addi s2, s2, 0x4
REG_S s2, PT_SEPC(sp) REG_S s2, PT_EPC(sp)
/* Trace syscalls, but only if requested by the user. */ /* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp) REG_L t0, TASK_TI_FLAGS(tp)
andi t0, t0, _TIF_SYSCALL_WORK andi t0, t0, _TIF_SYSCALL_WORK
...@@ -226,8 +228,25 @@ check_syscall_nr: ...@@ -226,8 +228,25 @@ check_syscall_nr:
/* Check to make sure we don't jump to a bogus syscall number. */ /* Check to make sure we don't jump to a bogus syscall number. */
li t0, __NR_syscalls li t0, __NR_syscalls
la s0, sys_ni_syscall la s0, sys_ni_syscall
/* Syscall number held in a7 */ /*
bgeu a7, t0, 1f * The tracer can change syscall number to valid/invalid value.
* We use syscall_set_nr helper in syscall_trace_enter thus we
* cannot trust the current value in a7 and have to reload from
* the current task pt_regs.
*/
REG_L a7, PT_A7(sp)
/*
* Syscall number held in a7.
* If syscall number is above allowed value, redirect to ni_syscall.
*/
bge a7, t0, 1f
/*
* Check if syscall is rejected by tracer or seccomp, i.e., a7 == -1.
* If yes, we pretend it was executed.
*/
li t1, -1
beq a7, t1, ret_from_syscall_rejected
/* Call syscall */
la s0, sys_call_table la s0, sys_call_table
slli t0, a7, RISCV_LGPTR slli t0, a7, RISCV_LGPTR
add s0, s0, t0 add s0, s0, t0
...@@ -238,15 +257,27 @@ check_syscall_nr: ...@@ -238,15 +257,27 @@ check_syscall_nr:
ret_from_syscall: ret_from_syscall:
/* Set user a0 to kernel a0 */ /* Set user a0 to kernel a0 */
REG_S a0, PT_A0(sp) REG_S a0, PT_A0(sp)
/*
* We didn't execute the actual syscall.
* Seccomp already set return value for the current task pt_regs.
* (If it was configured with SECCOMP_RET_ERRNO/TRACE)
*/
ret_from_syscall_rejected:
/* Trace syscalls, but only if requested by the user. */ /* Trace syscalls, but only if requested by the user. */
REG_L t0, TASK_TI_FLAGS(tp) REG_L t0, TASK_TI_FLAGS(tp)
andi t0, t0, _TIF_SYSCALL_WORK andi t0, t0, _TIF_SYSCALL_WORK
bnez t0, handle_syscall_trace_exit bnez t0, handle_syscall_trace_exit
ret_from_exception: ret_from_exception:
REG_L s0, PT_SSTATUS(sp) REG_L s0, PT_STATUS(sp)
csrc CSR_SSTATUS, SR_SIE csrc CSR_STATUS, SR_IE
#ifdef CONFIG_RISCV_M_MODE
/* the MPP value is too large to be used as an immediate arg for addi */
li t0, SR_MPP
and s0, s0, t0
#else
andi s0, s0, SR_SPP andi s0, s0, SR_SPP
#endif
bnez s0, resume_kernel bnez s0, resume_kernel
resume_userspace: resume_userspace:
...@@ -260,14 +291,18 @@ resume_userspace: ...@@ -260,14 +291,18 @@ resume_userspace:
REG_S s0, TASK_TI_KERNEL_SP(tp) REG_S s0, TASK_TI_KERNEL_SP(tp)
/* /*
* Save TP into sscratch, so we can find the kernel data structures * Save TP into the scratch register , so we can find the kernel data
* again. * structures again.
*/ */
csrw CSR_SSCRATCH, tp csrw CSR_SCRATCH, tp
restore_all: restore_all:
RESTORE_ALL RESTORE_ALL
#ifdef CONFIG_RISCV_M_MODE
mret
#else
sret sret
#endif
#if IS_ENABLED(CONFIG_PREEMPT) #if IS_ENABLED(CONFIG_PREEMPT)
resume_kernel: resume_kernel:
...@@ -287,7 +322,7 @@ work_pending: ...@@ -287,7 +322,7 @@ work_pending:
bnez s1, work_resched bnez s1, work_resched
work_notifysig: work_notifysig:
/* Handle pending signals and notify-resume requests */ /* Handle pending signals and notify-resume requests */
csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */ csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
move a0, sp /* pt_regs */ move a0, sp /* pt_regs */
move a1, s0 /* current_thread_info->flags */ move a1, s0 /* current_thread_info->flags */
tail do_notify_resume tail do_notify_resume
...@@ -386,6 +421,10 @@ ENTRY(__switch_to) ...@@ -386,6 +421,10 @@ ENTRY(__switch_to)
ret ret
ENDPROC(__switch_to) ENDPROC(__switch_to)
#ifndef CONFIG_MMU
#define do_page_fault do_trap_unknown
#endif
.section ".rodata" .section ".rodata"
/* Exception vector table */ /* Exception vector table */
ENTRY(excp_vect_table) ENTRY(excp_vect_table)
...@@ -407,3 +446,10 @@ ENTRY(excp_vect_table) ...@@ -407,3 +446,10 @@ ENTRY(excp_vect_table)
RISCV_PTR do_page_fault /* store page fault */ RISCV_PTR do_page_fault /* store page fault */
excp_vect_table_end: excp_vect_table_end:
END(excp_vect_table) END(excp_vect_table)
#ifndef CONFIG_MMU
ENTRY(__user_rt_sigreturn)
li a7, __NR_rt_sigreturn
scall
END(__user_rt_sigreturn)
#endif
...@@ -23,7 +23,7 @@ ENTRY(__fstate_save) ...@@ -23,7 +23,7 @@ ENTRY(__fstate_save)
li a2, TASK_THREAD_F0 li a2, TASK_THREAD_F0
add a0, a0, a2 add a0, a0, a2
li t1, SR_FS li t1, SR_FS
csrs CSR_SSTATUS, t1 csrs CSR_STATUS, t1
frcsr t0 frcsr t0
fsd f0, TASK_THREAD_F0_F0(a0) fsd f0, TASK_THREAD_F0_F0(a0)
fsd f1, TASK_THREAD_F1_F0(a0) fsd f1, TASK_THREAD_F1_F0(a0)
...@@ -58,7 +58,7 @@ ENTRY(__fstate_save) ...@@ -58,7 +58,7 @@ ENTRY(__fstate_save)
fsd f30, TASK_THREAD_F30_F0(a0) fsd f30, TASK_THREAD_F30_F0(a0)
fsd f31, TASK_THREAD_F31_F0(a0) fsd f31, TASK_THREAD_F31_F0(a0)
sw t0, TASK_THREAD_FCSR_F0(a0) sw t0, TASK_THREAD_FCSR_F0(a0)
csrc CSR_SSTATUS, t1 csrc CSR_STATUS, t1
ret ret
ENDPROC(__fstate_save) ENDPROC(__fstate_save)
...@@ -67,7 +67,7 @@ ENTRY(__fstate_restore) ...@@ -67,7 +67,7 @@ ENTRY(__fstate_restore)
add a0, a0, a2 add a0, a0, a2
li t1, SR_FS li t1, SR_FS
lw t0, TASK_THREAD_FCSR_F0(a0) lw t0, TASK_THREAD_FCSR_F0(a0)
csrs CSR_SSTATUS, t1 csrs CSR_STATUS, t1
fld f0, TASK_THREAD_F0_F0(a0) fld f0, TASK_THREAD_F0_F0(a0)
fld f1, TASK_THREAD_F1_F0(a0) fld f1, TASK_THREAD_F1_F0(a0)
fld f2, TASK_THREAD_F2_F0(a0) fld f2, TASK_THREAD_F2_F0(a0)
...@@ -101,6 +101,6 @@ ENTRY(__fstate_restore) ...@@ -101,6 +101,6 @@ ENTRY(__fstate_restore)
fld f30, TASK_THREAD_F30_F0(a0) fld f30, TASK_THREAD_F30_F0(a0)
fld f31, TASK_THREAD_F31_F0(a0) fld f31, TASK_THREAD_F31_F0(a0)
fscsr t0 fscsr t0
csrc CSR_SSTATUS, t1 csrc CSR_STATUS, t1
ret ret
ENDPROC(__fstate_restore) ENDPROC(__fstate_restore)
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/csr.h> #include <asm/csr.h>
#include <asm/hwcap.h>
#include <asm/image.h> #include <asm/image.h>
__INIT __INIT
...@@ -47,8 +48,22 @@ ENTRY(_start) ...@@ -47,8 +48,22 @@ ENTRY(_start)
.global _start_kernel .global _start_kernel
_start_kernel: _start_kernel:
/* Mask all interrupts */ /* Mask all interrupts */
csrw CSR_SIE, zero csrw CSR_IE, zero
csrw CSR_SIP, zero csrw CSR_IP, zero
#ifdef CONFIG_RISCV_M_MODE
/* flush the instruction cache */
fence.i
/* Reset all registers except ra, a0, a1 */
call reset_regs
/*
* The hartid in a0 is expected later on, and we have no firmware
* to hand it to us.
*/
csrr a0, CSR_MHARTID
#endif /* CONFIG_RISCV_M_MODE */
/* Load the global pointer */ /* Load the global pointer */
.option push .option push
...@@ -61,7 +76,7 @@ _start_kernel: ...@@ -61,7 +76,7 @@ _start_kernel:
* floating point in kernel space * floating point in kernel space
*/ */
li t0, SR_FS li t0, SR_FS
csrc CSR_SSTATUS, t0 csrc CSR_STATUS, t0
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
li t0, CONFIG_NR_CPUS li t0, CONFIG_NR_CPUS
...@@ -94,8 +109,10 @@ clear_bss_done: ...@@ -94,8 +109,10 @@ clear_bss_done:
la sp, init_thread_union + THREAD_SIZE la sp, init_thread_union + THREAD_SIZE
mv a0, s1 mv a0, s1
call setup_vm call setup_vm
#ifdef CONFIG_MMU
la a0, early_pg_dir la a0, early_pg_dir
call relocate call relocate
#endif /* CONFIG_MMU */
/* Restore C environment */ /* Restore C environment */
la tp, init_task la tp, init_task
...@@ -106,6 +123,7 @@ clear_bss_done: ...@@ -106,6 +123,7 @@ clear_bss_done:
call parse_dtb call parse_dtb
tail start_kernel tail start_kernel
#ifdef CONFIG_MMU
relocate: relocate:
/* Relocate return address */ /* Relocate return address */
li a1, PAGE_OFFSET li a1, PAGE_OFFSET
...@@ -116,7 +134,7 @@ relocate: ...@@ -116,7 +134,7 @@ relocate:
/* Point stvec to virtual address of intruction after satp write */ /* Point stvec to virtual address of intruction after satp write */
la a2, 1f la a2, 1f
add a2, a2, a1 add a2, a2, a1
csrw CSR_STVEC, a2 csrw CSR_TVEC, a2
/* Compute satp for kernel page tables, but don't load it yet */ /* Compute satp for kernel page tables, but don't load it yet */
srl a2, a0, PAGE_SHIFT srl a2, a0, PAGE_SHIFT
...@@ -138,7 +156,7 @@ relocate: ...@@ -138,7 +156,7 @@ relocate:
1: 1:
/* Set trap vector to spin forever to help debug */ /* Set trap vector to spin forever to help debug */
la a0, .Lsecondary_park la a0, .Lsecondary_park
csrw CSR_STVEC, a0 csrw CSR_TVEC, a0
/* Reload the global pointer */ /* Reload the global pointer */
.option push .option push
...@@ -156,12 +174,13 @@ relocate: ...@@ -156,12 +174,13 @@ relocate:
sfence.vma sfence.vma
ret ret
#endif /* CONFIG_MMU */
.Lsecondary_start: .Lsecondary_start:
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Set trap vector to spin forever to help debug */ /* Set trap vector to spin forever to help debug */
la a3, .Lsecondary_park la a3, .Lsecondary_park
csrw CSR_STVEC, a3 csrw CSR_TVEC, a3
slli a3, a0, LGREG slli a3, a0, LGREG
la a1, __cpu_up_stack_pointer la a1, __cpu_up_stack_pointer
...@@ -181,9 +200,11 @@ relocate: ...@@ -181,9 +200,11 @@ relocate:
beqz tp, .Lwait_for_cpu_up beqz tp, .Lwait_for_cpu_up
fence fence
#ifdef CONFIG_MMU
/* Enable virtual memory and relocate to virtual address */ /* Enable virtual memory and relocate to virtual address */
la a0, swapper_pg_dir la a0, swapper_pg_dir
call relocate call relocate
#endif
tail smp_callin tail smp_callin
#endif #endif
...@@ -195,6 +216,85 @@ relocate: ...@@ -195,6 +216,85 @@ relocate:
j .Lsecondary_park j .Lsecondary_park
END(_start) END(_start)
#ifdef CONFIG_RISCV_M_MODE
ENTRY(reset_regs)
li sp, 0
li gp, 0
li tp, 0
li t0, 0
li t1, 0
li t2, 0
li s0, 0
li s1, 0
li a2, 0
li a3, 0
li a4, 0
li a5, 0
li a6, 0
li a7, 0
li s2, 0
li s3, 0
li s4, 0
li s5, 0
li s6, 0
li s7, 0
li s8, 0
li s9, 0
li s10, 0
li s11, 0
li t3, 0
li t4, 0
li t5, 0
li t6, 0
csrw sscratch, 0
#ifdef CONFIG_FPU
csrr t0, CSR_MISA
andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
bnez t0, .Lreset_regs_done
li t1, SR_FS
csrs CSR_STATUS, t1
fmv.s.x f0, zero
fmv.s.x f1, zero
fmv.s.x f2, zero
fmv.s.x f3, zero
fmv.s.x f4, zero
fmv.s.x f5, zero
fmv.s.x f6, zero
fmv.s.x f7, zero
fmv.s.x f8, zero
fmv.s.x f9, zero
fmv.s.x f10, zero
fmv.s.x f11, zero
fmv.s.x f12, zero
fmv.s.x f13, zero
fmv.s.x f14, zero
fmv.s.x f15, zero
fmv.s.x f16, zero
fmv.s.x f17, zero
fmv.s.x f18, zero
fmv.s.x f19, zero
fmv.s.x f20, zero
fmv.s.x f21, zero
fmv.s.x f22, zero
fmv.s.x f23, zero
fmv.s.x f24, zero
fmv.s.x f25, zero
fmv.s.x f26, zero
fmv.s.x f27, zero
fmv.s.x f28, zero
fmv.s.x f29, zero
fmv.s.x f30, zero
fmv.s.x f31, zero
csrw fcsr, 0
/* note that the caller must clear SR_FS */
#endif /* CONFIG_FPU */
.Lreset_regs_done:
ret
END(reset_regs)
#endif /* CONFIG_RISCV_M_MODE */
__PAGE_ALIGNED_BSS __PAGE_ALIGNED_BSS
/* Empty zero page */ /* Empty zero page */
.balign PAGE_SIZE .balign PAGE_SIZE
...@@ -11,13 +11,6 @@ ...@@ -11,13 +11,6 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <asm/smp.h> #include <asm/smp.h>
/*
* Possible interrupt causes:
*/
#define INTERRUPT_CAUSE_SOFTWARE IRQ_S_SOFT
#define INTERRUPT_CAUSE_TIMER IRQ_S_TIMER
#define INTERRUPT_CAUSE_EXTERNAL IRQ_S_EXT
int arch_show_interrupts(struct seq_file *p, int prec) int arch_show_interrupts(struct seq_file *p, int prec)
{ {
show_ipi_stats(p, prec); show_ipi_stats(p, prec);
...@@ -29,12 +22,12 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -29,12 +22,12 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs); struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter(); irq_enter();
switch (regs->scause & ~SCAUSE_IRQ_FLAG) { switch (regs->cause & ~CAUSE_IRQ_FLAG) {
case INTERRUPT_CAUSE_TIMER: case IRQ_TIMER:
riscv_timer_interrupt(); riscv_timer_interrupt();
break; break;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
case INTERRUPT_CAUSE_SOFTWARE: case IRQ_SOFT:
/* /*
* We only use software interrupts to pass IPIs, so if a non-SMP * We only use software interrupts to pass IPIs, so if a non-SMP
* system gets one, then we don't know what to do. * system gets one, then we don't know what to do.
...@@ -42,11 +35,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs) ...@@ -42,11 +35,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
riscv_software_interrupt(); riscv_software_interrupt();
break; break;
#endif #endif
case INTERRUPT_CAUSE_EXTERNAL: case IRQ_EXT:
handle_arch_irq(regs); handle_arch_irq(regs);
break; break;
default: default:
pr_alert("unexpected interrupt cause 0x%lx", regs->scause); pr_alert("unexpected interrupt cause 0x%lx", regs->cause);
BUG(); BUG();
} }
irq_exit(); irq_exit();
......
...@@ -67,7 +67,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry, ...@@ -67,7 +67,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
return; return;
fp = regs->s0; fp = regs->s0;
perf_callchain_store(entry, regs->sepc); perf_callchain_store(entry, regs->epc);
fp = user_backtrace(entry, fp, regs->ra); fp = user_backtrace(entry, fp, regs->ra);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack) while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
......
...@@ -35,8 +35,8 @@ void show_regs(struct pt_regs *regs) ...@@ -35,8 +35,8 @@ void show_regs(struct pt_regs *regs)
{ {
show_regs_print_info(KERN_DEFAULT); show_regs_print_info(KERN_DEFAULT);
pr_cont("sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n", pr_cont("epc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
regs->sepc, regs->ra, regs->sp); regs->epc, regs->ra, regs->sp);
pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n", pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
regs->gp, regs->tp, regs->t0); regs->gp, regs->tp, regs->t0);
pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n", pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
...@@ -58,23 +58,23 @@ void show_regs(struct pt_regs *regs) ...@@ -58,23 +58,23 @@ void show_regs(struct pt_regs *regs)
pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n", pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
regs->t5, regs->t6); regs->t5, regs->t6);
pr_cont("sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n", pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
regs->sstatus, regs->sbadaddr, regs->scause); regs->status, regs->badaddr, regs->cause);
} }
void start_thread(struct pt_regs *regs, unsigned long pc, void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp) unsigned long sp)
{ {
regs->sstatus = SR_SPIE; regs->status = SR_PIE;
if (has_fpu) { if (has_fpu) {
regs->sstatus |= SR_FS_INITIAL; regs->status |= SR_FS_INITIAL;
/* /*
* Restore the initial value to the FP register * Restore the initial value to the FP register
* before starting the user program. * before starting the user program.
*/ */
fstate_restore(current, regs); fstate_restore(current, regs);
} }
regs->sepc = pc; regs->epc = pc;
regs->sp = sp; regs->sp = sp;
set_fs(USER_DS); set_fs(USER_DS);
} }
...@@ -110,7 +110,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -110,7 +110,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
const register unsigned long gp __asm__ ("gp"); const register unsigned long gp __asm__ ("gp");
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
childregs->gp = gp; childregs->gp = gp;
childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */ /* Supervisor/Machine, irqs on: */
childregs->status = SR_PP | SR_PIE;
p->thread.ra = (unsigned long)ret_from_kernel_thread; p->thread.ra = (unsigned long)ret_from_kernel_thread;
p->thread.s[0] = usp; /* fn */ p->thread.s[0] = usp; /* fn */
......
...@@ -154,6 +154,16 @@ __visible void do_syscall_trace_enter(struct pt_regs *regs) ...@@ -154,6 +154,16 @@ __visible void do_syscall_trace_enter(struct pt_regs *regs)
if (tracehook_report_syscall_entry(regs)) if (tracehook_report_syscall_entry(regs))
syscall_set_nr(current, regs, -1); syscall_set_nr(current, regs, -1);
/*
* Do the secure computing after ptrace; failures should be fast.
* If this fails we might have return value in a0 from seccomp
* (via SECCOMP_RET_ERRNO/TRACE).
*/
if (secure_computing(NULL) == -1) {
syscall_set_nr(current, regs, -1);
return;
}
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, syscall_get_nr(current, regs)); trace_sys_enter(regs, syscall_get_nr(current, regs));
......
...@@ -5,12 +5,11 @@ ...@@ -5,12 +5,11 @@
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/pm.h> #include <linux/pm.h>
#include <asm/sbi.h>
static void default_power_off(void) static void default_power_off(void)
{ {
sbi_shutdown(); while (1)
while (1); wait_for_interrupt();
} }
void (*pm_power_off)(void) = default_power_off; void (*pm_power_off)(void) = default_power_off;
......
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/init.h>
#include <linux/pm.h>
#include <asm/sbi.h>
static void sbi_power_off(void)
{
sbi_shutdown();
}
static int __init sbi_init(void)
{
pm_power_off = sbi_power_off;
return 0;
}
early_initcall(sbi_init);
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/sched/task.h> #include <linux/sched/task.h>
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
#include <asm/clint.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -67,6 +68,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -67,6 +68,7 @@ void __init setup_arch(char **cmdline_p)
setup_bootmem(); setup_bootmem();
paging_init(); paging_init();
unflatten_device_tree(); unflatten_device_tree();
clint_init_boot_cpu();
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
swiotlb_init(1); swiotlb_init(1);
......
...@@ -17,11 +17,16 @@ ...@@ -17,11 +17,16 @@
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/csr.h> #include <asm/csr.h>
extern u32 __user_rt_sigreturn[2];
#define DEBUG_SIG 0 #define DEBUG_SIG 0
struct rt_sigframe { struct rt_sigframe {
struct siginfo info; struct siginfo info;
struct ucontext uc; struct ucontext uc;
#ifndef CONFIG_MMU
u32 sigreturn_code[2];
#endif
}; };
#ifdef CONFIG_FPU #ifdef CONFIG_FPU
...@@ -124,7 +129,7 @@ SYSCALL_DEFINE0(rt_sigreturn) ...@@ -124,7 +129,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
pr_info_ratelimited( pr_info_ratelimited(
"%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n", "%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
task->comm, task_pid_nr(task), __func__, task->comm, task_pid_nr(task), __func__,
frame, (void *)regs->sepc, (void *)regs->sp); frame, (void *)regs->epc, (void *)regs->sp);
} }
force_sig(SIGSEGV); force_sig(SIGSEGV);
return 0; return 0;
...@@ -166,7 +171,6 @@ static inline void __user *get_sigframe(struct ksignal *ksig, ...@@ -166,7 +171,6 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
return (void __user *)sp; return (void __user *)sp;
} }
static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs) struct pt_regs *regs)
{ {
...@@ -189,8 +193,19 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, ...@@ -189,8 +193,19 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
return -EFAULT; return -EFAULT;
/* Set up to return from userspace. */ /* Set up to return from userspace. */
#ifdef CONFIG_MMU
regs->ra = (unsigned long)VDSO_SYMBOL( regs->ra = (unsigned long)VDSO_SYMBOL(
current->mm->context.vdso, rt_sigreturn); current->mm->context.vdso, rt_sigreturn);
#else
/*
* For the nommu case we don't have a VDSO. Instead we push two
* instructions to call the rt_sigreturn syscall onto the user stack.
*/
if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn,
sizeof(frame->sigreturn_code)))
return -EFAULT;
regs->ra = (unsigned long)&frame->sigreturn_code;
#endif /* CONFIG_MMU */
/* /*
* Set up registers for signal handler. * Set up registers for signal handler.
...@@ -199,7 +214,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, ...@@ -199,7 +214,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
* We always pass siginfo and mcontext, regardless of SA_SIGINFO, * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
* since some things rely on this (e.g. glibc's debug/segfault.c). * since some things rely on this (e.g. glibc's debug/segfault.c).
*/ */
regs->sepc = (unsigned long)ksig->ka.sa.sa_handler; regs->epc = (unsigned long)ksig->ka.sa.sa_handler;
regs->sp = (unsigned long)frame; regs->sp = (unsigned long)frame;
regs->a0 = ksig->sig; /* a0: signal number */ regs->a0 = ksig->sig; /* a0: signal number */
regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */ regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
...@@ -208,7 +223,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, ...@@ -208,7 +223,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
#if DEBUG_SIG #if DEBUG_SIG
pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n", pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
current->comm, task_pid_nr(current), ksig->sig, current->comm, task_pid_nr(current), ksig->sig,
(void *)regs->sepc, (void *)regs->ra, frame); (void *)regs->epc, (void *)regs->ra, frame);
#endif #endif
return 0; return 0;
...@@ -220,10 +235,9 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -220,10 +235,9 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
int ret; int ret;
/* Are we from a system call? */ /* Are we from a system call? */
if (regs->scause == EXC_SYSCALL) { if (regs->cause == EXC_SYSCALL) {
/* Avoid additional syscall restarting via ret_from_exception */ /* Avoid additional syscall restarting via ret_from_exception */
regs->scause = -1UL; regs->cause = -1UL;
/* If so, check system call restarting.. */ /* If so, check system call restarting.. */
switch (regs->a0) { switch (regs->a0) {
case -ERESTART_RESTARTBLOCK: case -ERESTART_RESTARTBLOCK:
...@@ -239,7 +253,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) ...@@ -239,7 +253,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
/* fallthrough */ /* fallthrough */
case -ERESTARTNOINTR: case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0; regs->a0 = regs->orig_a0;
regs->sepc -= 0x4; regs->epc -= 0x4;
break; break;
} }
} }
...@@ -261,9 +275,9 @@ static void do_signal(struct pt_regs *regs) ...@@ -261,9 +275,9 @@ static void do_signal(struct pt_regs *regs)
} }
/* Did we come from a system call? */ /* Did we come from a system call? */
if (regs->scause == EXC_SYSCALL) { if (regs->cause == EXC_SYSCALL) {
/* Avoid additional syscall restarting via ret_from_exception */ /* Avoid additional syscall restarting via ret_from_exception */
regs->scause = -1UL; regs->cause = -1UL;
/* Restart the system call - no handlers present */ /* Restart the system call - no handlers present */
switch (regs->a0) { switch (regs->a0) {
...@@ -271,12 +285,12 @@ static void do_signal(struct pt_regs *regs) ...@@ -271,12 +285,12 @@ static void do_signal(struct pt_regs *regs)
case -ERESTARTSYS: case -ERESTARTSYS:
case -ERESTARTNOINTR: case -ERESTARTNOINTR:
regs->a0 = regs->orig_a0; regs->a0 = regs->orig_a0;
regs->sepc -= 0x4; regs->epc -= 0x4;
break; break;
case -ERESTART_RESTARTBLOCK: case -ERESTART_RESTARTBLOCK:
regs->a0 = regs->orig_a0; regs->a0 = regs->orig_a0;
regs->a7 = __NR_restart_syscall; regs->a7 = __NR_restart_syscall;
regs->sepc -= 0x4; regs->epc -= 0x4;
break; break;
} }
} }
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/clint.h>
#include <asm/sbi.h> #include <asm/sbi.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -92,7 +93,10 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op) ...@@ -92,7 +93,10 @@ static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
smp_mb__after_atomic(); smp_mb__after_atomic();
riscv_cpuid_to_hartid_mask(mask, &hartid_mask); riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
sbi_send_ipi(cpumask_bits(&hartid_mask)); if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_send_ipi(cpumask_bits(&hartid_mask));
else
clint_send_ipi_mask(&hartid_mask);
} }
static void send_ipi_single(int cpu, enum ipi_message_type op) static void send_ipi_single(int cpu, enum ipi_message_type op)
...@@ -103,12 +107,18 @@ static void send_ipi_single(int cpu, enum ipi_message_type op) ...@@ -103,12 +107,18 @@ static void send_ipi_single(int cpu, enum ipi_message_type op)
set_bit(op, &ipi_data[cpu].bits); set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic(); smp_mb__after_atomic();
sbi_send_ipi(cpumask_bits(cpumask_of(hartid))); if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
else
clint_send_ipi_single(hartid);
} }
static inline void clear_ipi(void) static inline void clear_ipi(void)
{ {
csr_clear(CSR_SIP, SIE_SSIE); if (IS_ENABLED(CONFIG_RISCV_SBI))
csr_clear(CSR_IP, IE_SIE);
else
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
} }
void riscv_software_interrupt(void) void riscv_software_interrupt(void)
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <asm/clint.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -137,6 +138,9 @@ asmlinkage __visible void __init smp_callin(void) ...@@ -137,6 +138,9 @@ asmlinkage __visible void __init smp_callin(void)
{ {
struct mm_struct *mm = &init_mm; struct mm_struct *mm = &init_mm;
if (!IS_ENABLED(CONFIG_RISCV_SBI))
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
/* All kernel threads share the same mm context. */ /* All kernel threads share the same mm context. */
mmgrab(mm); mmgrab(mm);
current->active_mm = mm; current->active_mm = mm;
......
...@@ -41,7 +41,7 @@ void die(struct pt_regs *regs, const char *str) ...@@ -41,7 +41,7 @@ void die(struct pt_regs *regs, const char *str)
print_modules(); print_modules();
show_regs(regs); show_regs(regs);
ret = notify_die(DIE_OOPS, str, regs, 0, regs->scause, SIGSEGV); ret = notify_die(DIE_OOPS, str, regs, 0, regs->cause, SIGSEGV);
bust_spinlocks(0); bust_spinlocks(0);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
...@@ -86,7 +86,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code, ...@@ -86,7 +86,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
#define DO_ERROR_INFO(name, signo, code, str) \ #define DO_ERROR_INFO(name, signo, code, str) \
asmlinkage __visible void name(struct pt_regs *regs) \ asmlinkage __visible void name(struct pt_regs *regs) \
{ \ { \
do_trap_error(regs, signo, code, regs->sepc, "Oops - " str); \ do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
} }
DO_ERROR_INFO(do_trap_unknown, DO_ERROR_INFO(do_trap_unknown,
...@@ -124,9 +124,9 @@ static inline unsigned long get_break_insn_length(unsigned long pc) ...@@ -124,9 +124,9 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
asmlinkage __visible void do_trap_break(struct pt_regs *regs) asmlinkage __visible void do_trap_break(struct pt_regs *regs)
{ {
if (user_mode(regs)) if (user_mode(regs))
force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->sepc); force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
else if (report_bug(regs->sepc, regs) == BUG_TRAP_TYPE_WARN) else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
regs->sepc += get_break_insn_length(regs->sepc); regs->epc += get_break_insn_length(regs->epc);
else else
die(regs, "Kernel BUG"); die(regs, "Kernel BUG");
} }
...@@ -153,9 +153,9 @@ void __init trap_init(void) ...@@ -153,9 +153,9 @@ void __init trap_init(void)
* Set sup0 scratch register to 0, indicating to exception vector * Set sup0 scratch register to 0, indicating to exception vector
* that we are presently executing in the kernel * that we are presently executing in the kernel
*/ */
csr_write(CSR_SSCRATCH, 0); csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */ /* Set the exception vector address */
csr_write(CSR_STVEC, &handle_exception); csr_write(CSR_TVEC, &handle_exception);
/* Enable all interrupts */ /* Enable all interrupts */
csr_write(CSR_SIE, -1); csr_write(CSR_IE, -1);
} }
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
lib-y += delay.o lib-y += delay.o
lib-y += memcpy.o lib-y += memcpy.o
lib-y += memset.o lib-y += memset.o
lib-y += uaccess.o lib-$(CONFIG_MMU) += uaccess.o
lib-$(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_64BIT) += tishift.o
...@@ -18,7 +18,7 @@ ENTRY(__asm_copy_from_user) ...@@ -18,7 +18,7 @@ ENTRY(__asm_copy_from_user)
/* Enable access to user memory */ /* Enable access to user memory */
li t6, SR_SUM li t6, SR_SUM
csrs CSR_SSTATUS, t6 csrs CSR_STATUS, t6
add a3, a1, a2 add a3, a1, a2
/* Use word-oriented copy only if low-order bits match */ /* Use word-oriented copy only if low-order bits match */
...@@ -47,7 +47,7 @@ ENTRY(__asm_copy_from_user) ...@@ -47,7 +47,7 @@ ENTRY(__asm_copy_from_user)
3: 3:
/* Disable access to user memory */ /* Disable access to user memory */
csrc CSR_SSTATUS, t6 csrc CSR_STATUS, t6
li a0, 0 li a0, 0
ret ret
4: /* Edge case: unalignment */ 4: /* Edge case: unalignment */
...@@ -72,7 +72,7 @@ ENTRY(__clear_user) ...@@ -72,7 +72,7 @@ ENTRY(__clear_user)
/* Enable access to user memory */ /* Enable access to user memory */
li t6, SR_SUM li t6, SR_SUM
csrs CSR_SSTATUS, t6 csrs CSR_STATUS, t6
add a3, a0, a1 add a3, a0, a1
addi t0, a0, SZREG-1 addi t0, a0, SZREG-1
...@@ -94,7 +94,7 @@ ENTRY(__clear_user) ...@@ -94,7 +94,7 @@ ENTRY(__clear_user)
3: 3:
/* Disable access to user memory */ /* Disable access to user memory */
csrc CSR_SSTATUS, t6 csrc CSR_STATUS, t6
li a0, 0 li a0, 0
ret ret
4: /* Edge case: unalignment */ 4: /* Edge case: unalignment */
...@@ -114,11 +114,11 @@ ENDPROC(__clear_user) ...@@ -114,11 +114,11 @@ ENDPROC(__clear_user)
/* Fixup code for __copy_user(10) and __clear_user(11) */ /* Fixup code for __copy_user(10) and __clear_user(11) */
10: 10:
/* Disable access to user memory */ /* Disable access to user memory */
csrs CSR_SSTATUS, t6 csrs CSR_STATUS, t6
mv a0, a2 mv a0, a2
ret ret
11: 11:
csrs CSR_SSTATUS, t6 csrs CSR_STATUS, t6
mv a0, a1 mv a0, a1
ret ret
.previous .previous
...@@ -6,9 +6,8 @@ CFLAGS_REMOVE_init.o = -pg ...@@ -6,9 +6,8 @@ CFLAGS_REMOVE_init.o = -pg
endif endif
obj-y += init.o obj-y += init.o
obj-y += fault.o
obj-y += extable.o obj-y += extable.o
obj-y += ioremap.o obj-$(CONFIG_MMU) += fault.o ioremap.o
obj-y += cacheflush.o obj-y += cacheflush.o
obj-y += context.o obj-y += context.o
obj-y += sifive_l2_cache.o obj-y += sifive_l2_cache.o
......
...@@ -10,9 +10,17 @@ ...@@ -10,9 +10,17 @@
#include <asm/sbi.h> #include <asm/sbi.h>
static void ipi_remote_fence_i(void *info)
{
return local_flush_icache_all();
}
void flush_icache_all(void) void flush_icache_all(void)
{ {
sbi_remote_fence_i(NULL); if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_remote_fence_i(NULL);
else
on_each_cpu(ipi_remote_fence_i, NULL, 1);
} }
/* /*
...@@ -28,7 +36,7 @@ void flush_icache_all(void) ...@@ -28,7 +36,7 @@ void flush_icache_all(void)
void flush_icache_mm(struct mm_struct *mm, bool local) void flush_icache_mm(struct mm_struct *mm, bool local)
{ {
unsigned int cpu; unsigned int cpu;
cpumask_t others, hmask, *mask; cpumask_t others, *mask;
preempt_disable(); preempt_disable();
...@@ -46,10 +54,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local) ...@@ -46,10 +54,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
*/ */
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others); local |= cpumask_empty(&others);
if (mm != current->active_mm || !local) { if (mm == current->active_mm && local) {
riscv_cpuid_to_hartid_mask(&others, &hmask);
sbi_remote_fence_i(hmask.bits);
} else {
/* /*
* It's assumed that at least one strongly ordered operation is * It's assumed that at least one strongly ordered operation is
* performed on this hart between setting a hart's cpumask bit * performed on this hart between setting a hart's cpumask bit
...@@ -59,6 +64,13 @@ void flush_icache_mm(struct mm_struct *mm, bool local) ...@@ -59,6 +64,13 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
* with flush_icache_deferred(). * with flush_icache_deferred().
*/ */
smp_mb(); smp_mb();
} else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
cpumask_t hartid_mask;
riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
sbi_remote_fence_i(cpumask_bits(&hartid_mask));
} else {
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
} }
preempt_enable(); preempt_enable();
...@@ -66,6 +78,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local) ...@@ -66,6 +78,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifdef CONFIG_MMU
void flush_icache_pte(pte_t pte) void flush_icache_pte(pte_t pte)
{ {
struct page *page = pte_page(pte); struct page *page = pte_page(pte);
...@@ -73,3 +86,4 @@ void flush_icache_pte(pte_t pte) ...@@ -73,3 +86,4 @@ void flush_icache_pte(pte_t pte)
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) if (!test_and_set_bit(PG_dcache_clean, &page->flags))
flush_icache_all(); flush_icache_all();
} }
#endif /* CONFIG_MMU */
...@@ -58,8 +58,10 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -58,8 +58,10 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next)); cpumask_set_cpu(cpu, mm_cpumask(next));
#ifdef CONFIG_MMU
csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE); csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
local_flush_tlb_all(); local_flush_tlb_all();
#endif
flush_icache_deferred(next); flush_icache_deferred(next);
} }
...@@ -15,9 +15,9 @@ int fixup_exception(struct pt_regs *regs) ...@@ -15,9 +15,9 @@ int fixup_exception(struct pt_regs *regs)
{ {
const struct exception_table_entry *fixup; const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->sepc); fixup = search_exception_tables(regs->epc);
if (fixup) { if (fixup) {
regs->sepc = fixup->fixup; regs->epc = fixup->fixup;
return 1; return 1;
} }
return 0; return 0;
......
...@@ -34,8 +34,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -34,8 +34,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
int code = SEGV_MAPERR; int code = SEGV_MAPERR;
vm_fault_t fault; vm_fault_t fault;
cause = regs->scause; cause = regs->cause;
addr = regs->sbadaddr; addr = regs->badaddr;
tsk = current; tsk = current;
mm = tsk->mm; mm = tsk->mm;
...@@ -53,7 +53,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -53,7 +53,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
goto vmalloc_fault; goto vmalloc_fault;
/* Enable interrupts if they were enabled in the parent context. */ /* Enable interrupts if they were enabled in the parent context. */
if (likely(regs->sstatus & SR_SPIE)) if (likely(regs->status & SR_PIE))
local_irq_enable(); local_irq_enable();
/* /*
......
...@@ -26,6 +26,7 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] ...@@ -26,6 +26,7 @@ unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
extern char _start[]; extern char _start[];
void *dtb_early_va;
static void __init zone_sizes_init(void) static void __init zone_sizes_init(void)
{ {
...@@ -40,7 +41,7 @@ static void __init zone_sizes_init(void) ...@@ -40,7 +41,7 @@ static void __init zone_sizes_init(void)
free_area_init_nodes(max_zone_pfns); free_area_init_nodes(max_zone_pfns);
} }
void setup_zero_page(void) static void setup_zero_page(void)
{ {
memset((void *)empty_zero_page, 0, PAGE_SIZE); memset((void *)empty_zero_page, 0, PAGE_SIZE);
} }
...@@ -142,12 +143,12 @@ void __init setup_bootmem(void) ...@@ -142,12 +143,12 @@ void __init setup_bootmem(void)
} }
} }
#ifdef CONFIG_MMU
unsigned long va_pa_offset; unsigned long va_pa_offset;
EXPORT_SYMBOL(va_pa_offset); EXPORT_SYMBOL(va_pa_offset);
unsigned long pfn_base; unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base); EXPORT_SYMBOL(pfn_base);
void *dtb_early_va;
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
...@@ -273,7 +274,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp, ...@@ -273,7 +274,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
#define get_pgd_next_virt(__pa) get_pmd_virt(__pa) #define get_pgd_next_virt(__pa) get_pmd_virt(__pa)
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pmd_mapping(__nextp, __va, __pa, __sz, __prot) create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
#define PTE_PARENT_SIZE PMD_SIZE
#define fixmap_pgd_next fixmap_pmd #define fixmap_pgd_next fixmap_pmd
#else #else
#define pgd_next_t pte_t #define pgd_next_t pte_t
...@@ -281,7 +281,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp, ...@@ -281,7 +281,6 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
#define get_pgd_next_virt(__pa) get_pte_virt(__pa) #define get_pgd_next_virt(__pa) get_pte_virt(__pa)
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \ #define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pte_mapping(__nextp, __va, __pa, __sz, __prot) create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define PTE_PARENT_SIZE PGDIR_SIZE
#define fixmap_pgd_next fixmap_pte #define fixmap_pgd_next fixmap_pte
#endif #endif
...@@ -314,14 +313,11 @@ static void __init create_pgd_mapping(pgd_t *pgdp, ...@@ -314,14 +313,11 @@ static void __init create_pgd_mapping(pgd_t *pgdp,
static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size) static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
{ {
uintptr_t map_size = PAGE_SIZE; /* Upgrade to PMD_SIZE mappings whenever possible */
if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
/* Upgrade to PMD/PGDIR mappings whenever possible */ return PAGE_SIZE;
if (!(base & (PTE_PARENT_SIZE - 1)) &&
!(size & (PTE_PARENT_SIZE - 1)))
map_size = PTE_PARENT_SIZE;
return map_size; return PMD_SIZE;
} }
/* /*
...@@ -449,6 +445,16 @@ static void __init setup_vm_final(void) ...@@ -449,6 +445,16 @@ static void __init setup_vm_final(void)
csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE); csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all(); local_flush_tlb_all();
} }
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
dtb_early_va = (void *)dtb_pa;
}
static inline void setup_vm_final(void)
{
}
#endif /* CONFIG_MMU */
void __init paging_init(void) void __init paging_init(void)
{ {
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/sched.h>
#include <asm/sbi.h> #include <asm/sbi.h>
void flush_tlb_all(void) void flush_tlb_all(void)
...@@ -9,13 +10,33 @@ void flush_tlb_all(void) ...@@ -9,13 +10,33 @@ void flush_tlb_all(void)
sbi_remote_sfence_vma(NULL, 0, -1); sbi_remote_sfence_vma(NULL, 0, -1);
} }
/*
* This function must not be called with cmask being null.
* Kernel may panic if cmask is NULL.
*/
static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start, static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
unsigned long size) unsigned long size)
{ {
struct cpumask hmask; struct cpumask hmask;
unsigned int cpuid;
riscv_cpuid_to_hartid_mask(cmask, &hmask); if (cpumask_empty(cmask))
sbi_remote_sfence_vma(hmask.bits, start, size); return;
cpuid = get_cpu();
if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
/* local cpu is the only cpu present in cpumask */
if (size <= PAGE_SIZE)
local_flush_tlb_page(start);
else
local_flush_tlb_all();
} else {
riscv_cpuid_to_hartid_mask(cmask, &hmask);
sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size);
}
put_cpu();
} }
void flush_tlb_mm(struct mm_struct *mm) void flush_tlb_mm(struct mm_struct *mm)
......
...@@ -3,9 +3,9 @@ ...@@ -3,9 +3,9 @@
* Copyright (C) 2012 Regents of the University of California * Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive * Copyright (C) 2017 SiFive
* *
* All RISC-V systems have a timer attached to every hart. These timers can be * All RISC-V systems have a timer attached to every hart. These timers can
* read from the "time" and "timeh" CSRs, and can use the SBI to setup * either be read from the "time" and "timeh" CSRs, and can use the SBI to
* events. * setup events, or directly accessed using MMIO registers.
*/ */
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
...@@ -13,14 +13,29 @@ ...@@ -13,14 +13,29 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/sched_clock.h> #include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/sbi.h> #include <asm/sbi.h>
u64 __iomem *riscv_time_cmp;
u64 __iomem *riscv_time_val;
static inline void mmio_set_timer(u64 val)
{
void __iomem *r;
r = riscv_time_cmp + cpuid_to_hartid_map(smp_processor_id());
writeq_relaxed(val, r);
}
static int riscv_clock_next_event(unsigned long delta, static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce) struct clock_event_device *ce)
{ {
csr_set(sie, SIE_STIE); csr_set(CSR_IE, IE_TIE);
sbi_set_timer(get_cycles64() + delta); if (IS_ENABLED(CONFIG_RISCV_SBI))
sbi_set_timer(get_cycles64() + delta);
else
mmio_set_timer(get_cycles64() + delta);
return 0; return 0;
} }
...@@ -61,13 +76,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu) ...@@ -61,13 +76,13 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
ce->cpumask = cpumask_of(cpu); ce->cpumask = cpumask_of(cpu);
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff); clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
csr_set(sie, SIE_STIE); csr_set(CSR_IE, IE_TIE);
return 0; return 0;
} }
static int riscv_timer_dying_cpu(unsigned int cpu) static int riscv_timer_dying_cpu(unsigned int cpu)
{ {
csr_clear(sie, SIE_STIE); csr_clear(CSR_IE, IE_TIE);
return 0; return 0;
} }
...@@ -76,7 +91,7 @@ void riscv_timer_interrupt(void) ...@@ -76,7 +91,7 @@ void riscv_timer_interrupt(void)
{ {
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event); struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
csr_clear(sie, SIE_STIE); csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev); evdev->event_handler(evdev);
} }
......
...@@ -181,7 +181,7 @@ static void plic_handle_irq(struct pt_regs *regs) ...@@ -181,7 +181,7 @@ static void plic_handle_irq(struct pt_regs *regs)
WARN_ON_ONCE(!handler->present); WARN_ON_ONCE(!handler->present);
csr_clear(sie, SIE_SEIE); csr_clear(CSR_IE, IE_EIE);
while ((hwirq = readl(claim))) { while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(plic_irqdomain, hwirq); int irq = irq_find_mapping(plic_irqdomain, hwirq);
...@@ -191,7 +191,7 @@ static void plic_handle_irq(struct pt_regs *regs) ...@@ -191,7 +191,7 @@ static void plic_handle_irq(struct pt_regs *regs)
else else
generic_handle_irq(irq); generic_handle_irq(irq);
} }
csr_set(sie, SIE_SEIE); csr_set(CSR_IE, IE_EIE);
} }
/* /*
...@@ -252,8 +252,11 @@ static int __init plic_init(struct device_node *node, ...@@ -252,8 +252,11 @@ static int __init plic_init(struct device_node *node,
continue; continue;
} }
/* skip contexts other than supervisor external interrupt */ /*
if (parent.args[0] != IRQ_S_EXT) * Skip contexts other than external interrupts for our
* privilege level.
*/
if (parent.args[0] != IRQ_EXT)
continue; continue;
hartid = plic_find_hart_id(parent.np); hartid = plic_find_hart_id(parent.np);
......
...@@ -89,7 +89,7 @@ config HVC_DCC ...@@ -89,7 +89,7 @@ config HVC_DCC
config HVC_RISCV_SBI config HVC_RISCV_SBI
bool "RISC-V SBI console support" bool "RISC-V SBI console support"
depends on RISCV depends on RISCV_SBI
select HVC_DRIVER select HVC_DRIVER
help help
This enables support for console output via RISC-V SBI calls, which This enables support for console output via RISC-V SBI calls, which
......
...@@ -88,7 +88,7 @@ config SERIAL_EARLYCON_ARM_SEMIHOST ...@@ -88,7 +88,7 @@ config SERIAL_EARLYCON_ARM_SEMIHOST
config SERIAL_EARLYCON_RISCV_SBI config SERIAL_EARLYCON_RISCV_SBI
bool "Early console using RISC-V SBI" bool "Early console using RISC-V SBI"
depends on RISCV depends on RISCV_SBI
select SERIAL_CORE select SERIAL_CORE
select SERIAL_CORE_CONSOLE select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON select SERIAL_EARLYCON
......
...@@ -112,6 +112,8 @@ struct seccomp_data { ...@@ -112,6 +112,8 @@ struct seccomp_data {
# define __NR_seccomp 383 # define __NR_seccomp 383
# elif defined(__aarch64__) # elif defined(__aarch64__)
# define __NR_seccomp 277 # define __NR_seccomp 277
# elif defined(__riscv)
# define __NR_seccomp 277
# elif defined(__hppa__) # elif defined(__hppa__)
# define __NR_seccomp 338 # define __NR_seccomp 338
# elif defined(__powerpc__) # elif defined(__powerpc__)
...@@ -1587,6 +1589,10 @@ TEST_F(TRACE_poke, getpid_runs_normally) ...@@ -1587,6 +1589,10 @@ TEST_F(TRACE_poke, getpid_runs_normally)
# define ARCH_REGS struct user_pt_regs # define ARCH_REGS struct user_pt_regs
# define SYSCALL_NUM regs[8] # define SYSCALL_NUM regs[8]
# define SYSCALL_RET regs[0] # define SYSCALL_RET regs[0]
#elif defined(__riscv) && __riscv_xlen == 64
# define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM a7
# define SYSCALL_RET a0
#elif defined(__hppa__) #elif defined(__hppa__)
# define ARCH_REGS struct user_regs_struct # define ARCH_REGS struct user_regs_struct
# define SYSCALL_NUM gr[20] # define SYSCALL_NUM gr[20]
...@@ -1676,7 +1682,7 @@ void change_syscall(struct __test_metadata *_metadata, ...@@ -1676,7 +1682,7 @@ void change_syscall(struct __test_metadata *_metadata,
EXPECT_EQ(0, ret) {} EXPECT_EQ(0, ret) {}
#if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \ #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \
defined(__s390__) || defined(__hppa__) defined(__s390__) || defined(__hppa__) || defined(__riscv)
{ {
regs.SYSCALL_NUM = syscall; regs.SYSCALL_NUM = syscall;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment