Commit 4b1d362d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-5.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:

 - SPDX comment style fix

 - ignore memory that is unusable

 - avoid setting a kernel text offset for the !MMU kernels, where
   skipping the first page of memory is both unnecessary and costly

 - avoid passing the flag bits in satp to pfn_to_virt()

 - fix __put_kernel_nofault, where we had the arguments to
   __put_user_nocheck reversed

 - workaround for a bug in the FU540 to avoid triggering PMP issues
   during early boot

 - change to how we pull symbols out of the vDSO. The old mechanism was
   removed from binutils-2.35 (and has been backported to Debian's 2.34)

* tag 'riscv-for-linus-5.10-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  RISC-V: Fix the VDSO symbol generaton for binutils-2.35+
  RISC-V: Use non-PGD mappings for early DTB access
  riscv: uaccess: fix __put_kernel_nofault()
  riscv: fix pfn_to_virt err in do_page_fault().
  riscv: Set text_offset correctly for M-Mode
  RISC-V: Remove any memblock representing unusable memory area
  risc-v: kernel: ftrace: Fixes improper SPDX comment style
parents 659caaf6 c2c81bb2
......@@ -476,7 +476,7 @@ do { \
do { \
long __kr_err; \
\
__put_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
__put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
if (unlikely(__kr_err)) \
goto err_label; \
} while (0)
......
/* SPDX-License-Identifier: GPL-2.0 */
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013 Linaro Limited
* Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
......
......@@ -35,12 +35,17 @@ ENTRY(_start)
.word 0
#endif
.balign 8
#ifdef CONFIG_RISCV_M_MODE
/* Image load offset (0MB) from start of RAM for M-mode */
.dword 0
#else
#if __riscv_xlen == 64
/* Image load offset(2MB) from start of RAM */
.dword 0x200000
#else
/* Image load offset(4MB) from start of RAM */
.dword 0x400000
#endif
#endif
/* Effective size of kernel image */
.dword _end - _start
......
# SPDX-License-Identifier: GPL-2.0-only
vdso.lds
*.tmp
vdso-syms.S
......@@ -43,19 +43,14 @@ $(obj)/vdso.o: $(obj)/vdso.so
SYSCFLAGS_vdso.so.dbg = $(c_flags)
$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-Wl,--build-id -Wl,--hash-style=both
# We also create a special relocatable object that should mirror the symbol
# table and layout of the linked DSO. With ld --just-symbols we can then
# refer to these symbols in the kernel code rather than hand-coded addresses.
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
-Wl,--build-id=sha1 -Wl,--hash-style=both
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
$(call if_changed,vdsold)
LDFLAGS_vdso-syms.o := -r --just-symbols
$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
$(call if_changed,ld)
$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
$(call if_changed,so2s)
# strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
......@@ -73,6 +68,11 @@ quiet_cmd_vdsold = VDSOLD $@
$(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
rm $@.tmp
# Extracts symbol offsets from the VDSO, converting them into an assembly file
# that contains the same symbols at the same offsets.
quiet_cmd_so2s = SO2S $@
cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
# install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
......
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0+
# Copyright 2020 Palmer Dabbelt <palmerdabbelt@google.com>
sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_4.15\)*!.global \2\n.set \2,0x\1!' \
| grep '^\.'
......@@ -86,6 +86,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
int index;
unsigned long pfn;
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs))
......@@ -100,7 +101,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* of a task switch.
*/
index = pgd_index(addr);
pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index;
pfn = csr_read(CSR_SATP) & SATP_PPN;
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;
if (!pgd_present(*pgd_k)) {
......
......@@ -154,9 +154,8 @@ static void __init setup_initrd(void)
void __init setup_bootmem(void)
{
phys_addr_t mem_size = 0;
phys_addr_t total_mem = 0;
phys_addr_t mem_start, start, end = 0;
phys_addr_t mem_start = 0;
phys_addr_t start, end = 0;
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
u64 i;
......@@ -164,21 +163,18 @@ void __init setup_bootmem(void)
/* Find the memory region containing the kernel */
for_each_mem_range(i, &start, &end) {
phys_addr_t size = end - start;
if (!total_mem)
if (!mem_start)
mem_start = start;
if (start <= vmlinux_start && vmlinux_end <= end)
BUG_ON(size == 0);
total_mem = total_mem + size;
}
/*
* Remove memblock from the end of usable area to the
* end of region
* The maximal physical memory size is -PAGE_OFFSET.
* Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
* as it is unusable by kernel.
*/
mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET);
if (mem_start + mem_size < end)
memblock_remove(mem_start + mem_size,
end - mem_start - mem_size);
memblock_enforce_memory_limit(mem_start - PAGE_OFFSET);
/* Reserve from the start of the kernel to the end of the kernel */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
......@@ -297,6 +293,7 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
#define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE)
#endif
pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE);
pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
{
......@@ -494,6 +491,18 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
load_pa + (va - PAGE_OFFSET),
map_size, PAGE_KERNEL_EXEC);
#ifndef __PAGETABLE_PMD_FOLDED
/* Setup early PMD for DTB */
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
(uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
/* Create two consecutive PMD mappings for FDT early scan */
pa = dtb_pa & ~(PMD_SIZE - 1);
create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
pa, PMD_SIZE, PAGE_KERNEL);
create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
#else
/* Create two consecutive PGD mappings for FDT early scan */
pa = dtb_pa & ~(PGDIR_SIZE - 1);
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
......@@ -501,6 +510,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
#endif
dtb_early_pa = dtb_pa;
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment