Commit a1084542 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-5.6-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V updates from Palmer Dabbelt:
 "This contains a handful of patches for this merge window:

   - Support for kasan

   - 32-bit physical addresses on rv32i-based systems

   - Support for CONFIG_DEBUG_VIRTUAL

   - DT entry for the FU540 GPIO controller, which has recently had a
     device driver merged

  These boot a buildroot-based system on QEMU's virt board for me"

* tag 'riscv-for-linus-5.6-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: dts: Add DT support for SiFive FU540 GPIO driver
  riscv: mm: add support for CONFIG_DEBUG_VIRTUAL
  riscv: keep 32-bit kernel to 32-bit phys_addr_t
  kasan: Add riscv to KASAN documentation.
  riscv: Add KASAN support
  kasan: No KASAN's memmove check if archs don't have it.
parents b70a2d6b 61ffb9d2
...@@ -21,8 +21,8 @@ global variables yet. ...@@ -21,8 +21,8 @@ global variables yet.
Tag-based KASAN is only supported in Clang and requires version 7.0.0 or later. Tag-based KASAN is only supported in Clang and requires version 7.0.0 or later.
Currently generic KASAN is supported for the x86_64, arm64, xtensa and s390 Currently generic KASAN is supported for the x86_64, arm64, xtensa, s390 and
architectures, and tag-based KASAN is supported only for arm64. riscv architectures, and tag-based KASAN is supported only for arm64.
Usage Usage
----- -----
......
...@@ -12,8 +12,6 @@ config 32BIT ...@@ -12,8 +12,6 @@ config 32BIT
config RISCV config RISCV
def_bool y def_bool y
# even on 32-bit, physical (and DMA) addresses are > 32-bits
select PHYS_ADDR_T_64BIT
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
select OF_IRQ select OF_IRQ
...@@ -57,6 +55,7 @@ config RISCV ...@@ -57,6 +55,7 @@ config RISCV
select GENERIC_ARCH_TOPOLOGY if SMP select GENERIC_ARCH_TOPOLOGY if SMP
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MMIOWB select ARCH_HAS_MMIOWB
select ARCH_HAS_DEBUG_VIRTUAL
select HAVE_EBPF_JIT if 64BIT select HAVE_EBPF_JIT if 64BIT
select EDAC_SUPPORT select EDAC_SUPPORT
select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_GIGANTIC_PAGE
...@@ -66,6 +65,7 @@ config RISCV ...@@ -66,6 +65,7 @@ config RISCV
select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_MMAP_RND_BITS if MMU
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select HAVE_COPY_THREAD_TLS select HAVE_COPY_THREAD_TLS
select HAVE_ARCH_KASAN if MMU && 64BIT
config ARCH_MMAP_RND_BITS_MIN config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT default 18 if 64BIT
......
...@@ -268,6 +268,19 @@ l2cache: cache-controller@2010000 { ...@@ -268,6 +268,19 @@ l2cache: cache-controller@2010000 {
interrupts = <1 2 3>; interrupts = <1 2 3>;
reg = <0x0 0x2010000 0x0 0x1000>; reg = <0x0 0x2010000 0x0 0x1000>;
}; };
gpio: gpio@10060000 {
compatible = "sifive,fu540-c000-gpio", "sifive,gpio0";
interrupt-parent = <&plic0>;
interrupts = <7>, <8>, <9>, <10>, <11>, <12>, <13>,
<14>, <15>, <16>, <17>, <18>, <19>, <20>,
<21>, <22>;
reg = <0x0 0x10060000 0x0 0x1000>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
clocks = <&prci PRCI_CLK_TLCLK>;
status = "disabled";
};
}; };
}; };
...@@ -94,3 +94,7 @@ &pwm0 { ...@@ -94,3 +94,7 @@ &pwm0 {
&pwm1 { &pwm1 {
status = "okay"; status = "okay";
}; };
&gpio {
status = "okay";
};
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2019 Andes Technology Corporation */
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H
#ifndef __ASSEMBLY__
#ifdef CONFIG_KASAN
#include <asm/pgtable.h>
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
#define KASAN_SHADOW_START 0xffffffc000000000 /* 2^64 - 2^38 */
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
(64 - KASAN_SHADOW_SCALE_SHIFT)))
void kasan_init(void);
asmlinkage void kasan_early_init(void);
#endif
#endif
#endif /* __ASM_KASAN_H */
...@@ -100,8 +100,20 @@ extern unsigned long pfn_base; ...@@ -100,8 +100,20 @@ extern unsigned long pfn_base;
extern unsigned long max_low_pfn; extern unsigned long max_low_pfn;
extern unsigned long min_low_pfn; extern unsigned long min_low_pfn;
#define __pa(x) ((unsigned long)(x) - va_pa_offset) #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
#define __va(x) ((void *)((unsigned long) (x) + va_pa_offset)) #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
extern phys_addr_t __phys_addr_symbol(unsigned long x);
#else
#define __virt_to_phys(x) __va_to_pa_nodebug(x)
#define __phys_addr_symbol(x) __va_to_pa_nodebug(x)
#endif /* CONFIG_DEBUG_VIRTUAL */
#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
#define phys_to_pfn(phys) (PFN_DOWN(phys)) #define phys_to_pfn(phys) (PFN_DOWN(phys))
#define pfn_to_phys(pfn) (PFN_PHYS(pfn)) #define pfn_to_phys(pfn) (PFN_PHYS(pfn))
......
...@@ -58,6 +58,11 @@ static inline unsigned long pud_page_vaddr(pud_t pud) ...@@ -58,6 +58,11 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT); return (unsigned long)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
} }
static inline struct page *pud_page(pud_t pud)
{
return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
}
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
......
...@@ -11,8 +11,17 @@ ...@@ -11,8 +11,17 @@
#define __HAVE_ARCH_MEMSET #define __HAVE_ARCH_MEMSET
extern asmlinkage void *memset(void *, int, size_t); extern asmlinkage void *memset(void *, int, size_t);
extern asmlinkage void *__memset(void *, int, size_t);
#define __HAVE_ARCH_MEMCPY #define __HAVE_ARCH_MEMCPY
extern asmlinkage void *memcpy(void *, const void *, size_t); extern asmlinkage void *memcpy(void *, const void *, size_t);
extern asmlinkage void *__memcpy(void *, const void *, size_t);
/* For those files which don't want to check by kasan. */
#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memset(s, c, n) __memset(s, c, n)
#endif
#endif /* _ASM_RISCV_STRING_H */ #endif /* _ASM_RISCV_STRING_H */
...@@ -121,6 +121,9 @@ clear_bss_done: ...@@ -121,6 +121,9 @@ clear_bss_done:
sw zero, TASK_TI_CPU(tp) sw zero, TASK_TI_CPU(tp)
la sp, init_thread_union + THREAD_SIZE la sp, init_thread_union + THREAD_SIZE
#ifdef CONFIG_KASAN
call kasan_early_init
#endif
/* Start the kernel */ /* Start the kernel */
call parse_dtb call parse_dtb
tail start_kernel tail start_kernel
......
...@@ -11,3 +11,5 @@ ...@@ -11,3 +11,5 @@
*/ */
EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/kasan.h>
#include "head.h" #include "head.h"
...@@ -74,6 +75,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -74,6 +75,10 @@ void __init setup_arch(char **cmdline_p)
swiotlb_init(1); swiotlb_init(1);
#endif #endif
#ifdef CONFIG_KASAN
kasan_init();
#endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
setup_smp(); setup_smp();
#endif #endif
......
...@@ -46,6 +46,7 @@ SECTIONS ...@@ -46,6 +46,7 @@ SECTIONS
KPROBES_TEXT KPROBES_TEXT
ENTRY_TEXT ENTRY_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup) *(.fixup)
_etext = .; _etext = .;
} }
......
...@@ -7,7 +7,8 @@ ...@@ -7,7 +7,8 @@
#include <asm/asm.h> #include <asm/asm.h>
/* void *memcpy(void *, const void *, size_t) */ /* void *memcpy(void *, const void *, size_t) */
ENTRY(memcpy) ENTRY(__memcpy)
WEAK(memcpy)
move t6, a0 /* Preserve return value */ move t6, a0 /* Preserve return value */
/* Defer to byte-oriented copy for small sizes */ /* Defer to byte-oriented copy for small sizes */
...@@ -104,4 +105,4 @@ ENTRY(memcpy) ...@@ -104,4 +105,4 @@ ENTRY(memcpy)
bltu a1, a3, 5b bltu a1, a3, 5b
6: 6:
ret ret
END(memcpy) END(__memcpy)
...@@ -8,7 +8,8 @@ ...@@ -8,7 +8,8 @@
#include <asm/asm.h> #include <asm/asm.h>
/* void *memset(void *, int, size_t) */ /* void *memset(void *, int, size_t) */
ENTRY(memset) ENTRY(__memset)
WEAK(memset)
move t0, a0 /* Preserve return value */ move t0, a0 /* Preserve return value */
/* Defer to byte-oriented fill for small sizes */ /* Defer to byte-oriented fill for small sizes */
...@@ -109,4 +110,4 @@ ENTRY(memset) ...@@ -109,4 +110,4 @@ ENTRY(memset)
bltu t0, a3, 5b bltu t0, a3, 5b
6: 6:
ret ret
END(memset) END(__memset)
...@@ -15,3 +15,11 @@ ifeq ($(CONFIG_MMU),y) ...@@ -15,3 +15,11 @@ ifeq ($(CONFIG_MMU),y)
obj-$(CONFIG_SMP) += tlbflush.o obj-$(CONFIG_SMP) += tlbflush.o
endif endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_KASAN) += kasan_init.o
ifdef CONFIG_KASAN
KASAN_SANITIZE_kasan_init.o := n
KASAN_SANITIZE_init.o := n
endif
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2019 Andes Technology Corporation
#include <linux/pfn.h>
#include <linux/init_task.h>
#include <linux/kasan.h>
#include <linux/kernel.h>
#include <linux/memblock.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <asm/fixmap.h>
extern pgd_t early_pg_dir[PTRS_PER_PGD];
asmlinkage void __init kasan_early_init(void)
{
uintptr_t i;
pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_early_shadow_pte + i,
mk_pte(virt_to_page(kasan_early_shadow_page),
PAGE_KERNEL));
for (i = 0; i < PTRS_PER_PMD; ++i)
set_pmd(kasan_early_shadow_pmd + i,
pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
__pgprot(_PAGE_TABLE)));
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
i += PGDIR_SIZE, ++pgd)
set_pgd(pgd,
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
/* init for swapper_pg_dir */
pgd = pgd_offset_k(KASAN_SHADOW_START);
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
i += PGDIR_SIZE, ++pgd)
set_pgd(pgd,
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
flush_tlb_all();
}
static void __init populate(void *start, void *end)
{
unsigned long i;
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
unsigned long n_pmds =
(n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
n_pages / PTRS_PER_PTE;
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
for (i = 0; i < n_pages; i++) {
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
}
for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
__pgprot(_PAGE_TABLE)));
for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
__pgprot(_PAGE_TABLE)));
flush_tlb_all();
memset(start, 0, end - start);
}
void __init kasan_init(void)
{
struct memblock_region *reg;
unsigned long i;
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
for_each_memblock(memory, reg) {
void *start = (void *)__va(reg->base);
void *end = (void *)__va(reg->base + reg->size);
if (start >= end)
break;
populate(kasan_mem_to_shadow(start),
kasan_mem_to_shadow(end));
};
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
mk_pte(virt_to_page(kasan_early_shadow_page),
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
init_task.kasan_depth = 0;
}
// SPDX-License-Identifier: GPL-2.0
#include <linux/types.h>
#include <linux/mmdebug.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/sections.h>
phys_addr_t __virt_to_phys(unsigned long x)
{
phys_addr_t y = x - PAGE_OFFSET;
/*
* Boundary checking aginst the kernel linear mapping space.
*/
WARN(y >= KERN_VIRT_SIZE,
"virt_to_phys used for non-linear address: %pK (%pS)\n",
(void *)x, (void *)x);
return __va_to_pa_nodebug(x);
}
EXPORT_SYMBOL(__virt_to_phys);
phys_addr_t __phys_addr_symbol(unsigned long x)
{
unsigned long kernel_start = (unsigned long)PAGE_OFFSET;
unsigned long kernel_end = (unsigned long)_end;
/*
* Boundary checking aginst the kernel image mapping.
* __pa_symbol should only be used on kernel symbol addresses.
*/
VIRTUAL_BUG_ON(x < kernel_start || x > kernel_end);
return __va_to_pa_nodebug(x);
}
EXPORT_SYMBOL(__phys_addr_symbol);
...@@ -110,6 +110,7 @@ void *memset(void *addr, int c, size_t len) ...@@ -110,6 +110,7 @@ void *memset(void *addr, int c, size_t len)
return __memset(addr, c, len); return __memset(addr, c, len);
} }
#ifdef __HAVE_ARCH_MEMMOVE
#undef memmove #undef memmove
void *memmove(void *dest, const void *src, size_t len) void *memmove(void *dest, const void *src, size_t len)
{ {
...@@ -118,6 +119,7 @@ void *memmove(void *dest, const void *src, size_t len) ...@@ -118,6 +119,7 @@ void *memmove(void *dest, const void *src, size_t len)
return __memmove(dest, src, len); return __memmove(dest, src, len);
} }
#endif
#undef memcpy #undef memcpy
void *memcpy(void *dest, const void *src, size_t len) void *memcpy(void *dest, const void *src, size_t len)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment