Commit 58d4fafd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv/for-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V updates from Paul Walmsley:
 "Add the following new features:

   - Generic CPU topology description support for DT-based platforms,
     including ARM64, ARM and RISC-V.

   - Sparsemem support

   - Perf callchain support

   - SiFive PLIC irqchip modifications, in preparation for M-mode Linux

  and clean up the code base:

   - Clean up chip-specific register (CSR) manipulation code, IPIs, TLB
     flushing, and the RISC-V CPU-local timer code

   - Kbuild cleanup from one of the Kbuild maintainers"

[ The CPU topology parts came in through the arm64 tree with a shared
  branch   - Linus ]

* tag 'riscv/for-v5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  irqchip/sifive-plic: set max threshold for ignored handlers
  riscv: move the TLB flush logic out of line
  riscv: don't use the rdtime(h) pseudo-instructions
  riscv: cleanup riscv_cpuid_to_hartid_mask
  riscv: optimize send_ipi_single
  riscv: cleanup send_ipi_mask
  riscv: refactor the IPI code
  riscv: Add support for libdw
  riscv: Add support for perf registers sampling
  riscv: Add perf callchain support
  riscv: add arch/riscv/Kbuild
  RISC-V: Implement sparsemem
  riscv: Using CSR numbers to access CSRs
parents dbcda58a 9ce06497
# SPDX-License-Identifier: GPL-2.0-only
obj-y += kernel/ mm/ net/
......@@ -35,6 +35,8 @@ config RISCV
select HAVE_DMA_CONTIGUOUS
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
select SPARSE_IRQ
......@@ -55,6 +57,7 @@ config RISCV
select EDAC_SUPPORT
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT
config MMU
def_bool y
......@@ -63,12 +66,32 @@ config ZONE_DMA32
bool
default y if 64BIT
config VA_BITS
int
default 32 if 32BIT
default 39 if 64BIT
config PA_BITS
int
default 34 if 32BIT
default 56 if 64BIT
config PAGE_OFFSET
hex
default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
config ARCH_FLATMEM_ENABLE
def_bool y
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_SELECT_MEMORY_MODEL
def_bool ARCH_SPARSEMEM_ENABLE
config ARCH_WANT_GENERAL_HUGETLB
def_bool y
......
......@@ -54,6 +54,9 @@ endif
ifeq ($(CONFIG_MODULE_SECTIONS),y)
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/riscv/kernel/module.lds
endif
ifeq ($(CONFIG_PERF_EVENTS),y)
KBUILD_CFLAGS += -fno-omit-frame-pointer
endif
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
......@@ -72,7 +75,7 @@ KBUILD_IMAGE := $(boot)/Image.gz
head-y := arch/riscv/kernel/head.o
core-y += arch/riscv/kernel/ arch/riscv/mm/ arch/riscv/net/
core-y += arch/riscv/
libs-y += arch/riscv/lib/
......
......@@ -110,8 +110,10 @@ extern unsigned long min_low_pfn;
#define page_to_bus(page) (page_to_phys(page))
#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
#ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) \
(((pfn) >= pfn_base) && (((pfn)-pfn_base) < max_mapnr))
#endif
#define ARCH_PFN_OFFSET (pfn_base)
......
......@@ -83,6 +83,19 @@ extern pgd_t swapper_pg_dir[];
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
/*
* Roughly size the vmemmap space to be large enough to fit enough
* struct pages to map half the virtual address space. Then
* position vmemmap directly below the VMALLOC region.
*/
#define VMEMMAP_SHIFT \
(CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
#define VMEMMAP_END (VMALLOC_START - 1)
#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
#define vmemmap ((struct page *)VMEMMAP_START)
/*
* ZERO_PAGE is a global shared page that is always zero,
* used for zero-mapped memory areas, etc.
......
......@@ -61,11 +61,5 @@ static inline unsigned long cpuid_to_hartid_map(int cpu)
return boot_cpu_hartid;
}
static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
struct cpumask *out)
{
cpumask_set_cpu(cpuid_to_hartid_map(0), out);
}
#endif /* CONFIG_SMP */
#endif /* _ASM_RISCV_SMP_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SPARSEMEM_H
#define __ASM_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
#define MAX_PHYSMEM_BITS CONFIG_PA_BITS
#define SECTION_SIZE_BITS 27
#endif /* CONFIG_SPARSEMEM */
#endif /* __ASM_SPARSEMEM_H */
......@@ -6,43 +6,41 @@
#ifndef _ASM_RISCV_TIMEX_H
#define _ASM_RISCV_TIMEX_H
#include <asm/param.h>
#include <asm/csr.h>
typedef unsigned long cycles_t;
static inline cycles_t get_cycles_inline(void)
static inline cycles_t get_cycles(void)
{
cycles_t n;
__asm__ __volatile__ (
"rdtime %0"
: "=r" (n));
return n;
return csr_read(CSR_TIME);
}
#define get_cycles get_cycles_inline
#define get_cycles get_cycles
#ifdef CONFIG_64BIT
static inline uint64_t get_cycles64(void)
static inline u64 get_cycles64(void)
{
return get_cycles();
}
#else /* CONFIG_64BIT */
static inline u32 get_cycles_hi(void)
{
return get_cycles();
return csr_read(CSR_TIMEH);
}
#else
static inline uint64_t get_cycles64(void)
static inline u64 get_cycles64(void)
{
u32 lo, hi, tmp;
__asm__ __volatile__ (
"1:\n"
"rdtimeh %0\n"
"rdtime %1\n"
"rdtimeh %2\n"
"bne %0, %2, 1b"
: "=&r" (hi), "=&r" (lo), "=&r" (tmp));
u32 hi, lo;
do {
hi = get_cycles_hi();
lo = get_cycles();
} while (hi != get_cycles_hi());
return ((u64)hi << 32) | lo;
}
#endif
#endif /* CONFIG_64BIT */
#define ARCH_HAS_READ_CURRENT_TIMER
static inline int read_current_timer(unsigned long *timer_val)
{
*timer_val = get_cycles();
......
......@@ -25,8 +25,13 @@ static inline void local_flush_tlb_page(unsigned long addr)
__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
}
#ifndef CONFIG_SMP
#ifdef CONFIG_SMP
void flush_tlb_all(void);
void flush_tlb_mm(struct mm_struct *mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
#else /* CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
......@@ -37,35 +42,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
}
#define flush_tlb_mm(mm) flush_tlb_all()
#else /* CONFIG_SMP */
#include <asm/sbi.h>
static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
unsigned long size)
{
struct cpumask hmask;
cpumask_clear(&hmask);
riscv_cpuid_to_hartid_mask(cmask, &hmask);
sbi_remote_sfence_vma(hmask.bits, start, size);
}
#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
#define flush_tlb_range(vma, start, end) \
remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
flush_tlb_range(vma, addr, addr + PAGE_SIZE);
}
#define flush_tlb_mm(mm) \
remote_sfence_vma(mm_cpumask(mm), 0, -1)
#endif /* CONFIG_SMP */
/* Flush a range of kernel pages */
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
#ifndef _ASM_RISCV_PERF_REGS_H
#define _ASM_RISCV_PERF_REGS_H
enum perf_event_riscv_regs {
PERF_REG_RISCV_PC,
PERF_REG_RISCV_RA,
PERF_REG_RISCV_SP,
PERF_REG_RISCV_GP,
PERF_REG_RISCV_TP,
PERF_REG_RISCV_T0,
PERF_REG_RISCV_T1,
PERF_REG_RISCV_T2,
PERF_REG_RISCV_S0,
PERF_REG_RISCV_S1,
PERF_REG_RISCV_A0,
PERF_REG_RISCV_A1,
PERF_REG_RISCV_A2,
PERF_REG_RISCV_A3,
PERF_REG_RISCV_A4,
PERF_REG_RISCV_A5,
PERF_REG_RISCV_A6,
PERF_REG_RISCV_A7,
PERF_REG_RISCV_S2,
PERF_REG_RISCV_S3,
PERF_REG_RISCV_S4,
PERF_REG_RISCV_S5,
PERF_REG_RISCV_S6,
PERF_REG_RISCV_S7,
PERF_REG_RISCV_S8,
PERF_REG_RISCV_S9,
PERF_REG_RISCV_S10,
PERF_REG_RISCV_S11,
PERF_REG_RISCV_T3,
PERF_REG_RISCV_T4,
PERF_REG_RISCV_T5,
PERF_REG_RISCV_T6,
PERF_REG_RISCV_MAX,
};
#endif /* _ASM_RISCV_PERF_REGS_H */
......@@ -38,6 +38,8 @@ obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
clean:
......@@ -167,7 +167,7 @@ ENTRY(handle_exception)
tail do_IRQ
1:
/* Exceptions run with interrupts enabled */
csrs sstatus, SR_SIE
csrs CSR_SSTATUS, SR_SIE
/* Handle syscalls */
li t0, EXC_SYSCALL
......@@ -222,7 +222,7 @@ ret_from_syscall:
ret_from_exception:
REG_L s0, PT_SSTATUS(sp)
csrc sstatus, SR_SIE
csrc CSR_SSTATUS, SR_SIE
andi s0, s0, SR_SPP
bnez s0, resume_kernel
......@@ -265,7 +265,7 @@ work_pending:
bnez s1, work_resched
work_notifysig:
/* Handle pending signals and notify-resume requests */
csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
csrs CSR_SSTATUS, SR_SIE /* Enable interrupts for do_notify_resume() */
move a0, sp /* pt_regs */
move a1, s0 /* current_thread_info->flags */
tail do_notify_resume
......
......@@ -23,7 +23,7 @@ ENTRY(__fstate_save)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
csrs sstatus, t1
csrs CSR_SSTATUS, t1
frcsr t0
fsd f0, TASK_THREAD_F0_F0(a0)
fsd f1, TASK_THREAD_F1_F0(a0)
......@@ -58,7 +58,7 @@ ENTRY(__fstate_save)
fsd f30, TASK_THREAD_F30_F0(a0)
fsd f31, TASK_THREAD_F31_F0(a0)
sw t0, TASK_THREAD_FCSR_F0(a0)
csrc sstatus, t1
csrc CSR_SSTATUS, t1
ret
ENDPROC(__fstate_save)
......@@ -67,7 +67,7 @@ ENTRY(__fstate_restore)
add a0, a0, a2
li t1, SR_FS
lw t0, TASK_THREAD_FCSR_F0(a0)
csrs sstatus, t1
csrs CSR_SSTATUS, t1
fld f0, TASK_THREAD_F0_F0(a0)
fld f1, TASK_THREAD_F1_F0(a0)
fld f2, TASK_THREAD_F2_F0(a0)
......@@ -101,6 +101,6 @@ ENTRY(__fstate_restore)
fld f30, TASK_THREAD_F30_F0(a0)
fld f31, TASK_THREAD_F31_F0(a0)
fscsr t0
csrc sstatus, t1
csrc CSR_SSTATUS, t1
ret
ENDPROC(__fstate_restore)
......@@ -61,7 +61,7 @@ _start_kernel:
* floating point in kernel space
*/
li t0, SR_FS
csrc sstatus, t0
csrc CSR_SSTATUS, t0
/* Pick one hart to run the main boot sequence */
la a3, hart_lottery
......
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
#include <linux/perf_event.h>
#include <linux/uaccess.h>
/* Kernel callchain */
struct stackframe {
unsigned long fp;
unsigned long ra;
};
/*
* Get the return address for a single stackframe and return a pointer to the
* next frame tail.
*/
static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
unsigned long fp, unsigned long reg_ra)
{
struct stackframe buftail;
unsigned long ra = 0;
unsigned long *user_frame_tail =
(unsigned long *)(fp - sizeof(struct stackframe));
/* Check accessibility of one struct frame_tail beyond */
if (!access_ok(user_frame_tail, sizeof(buftail)))
return 0;
if (__copy_from_user_inatomic(&buftail, user_frame_tail,
sizeof(buftail)))
return 0;
if (reg_ra != 0)
ra = reg_ra;
else
ra = buftail.ra;
fp = buftail.fp;
if (ra != 0)
perf_callchain_store(entry, ra);
else
return 0;
return fp;
}
/*
* This will be called when the target is in user mode
* This function will only be called when we use
* "PERF_SAMPLE_CALLCHAIN" in
* kernel/events/core.c:perf_prepare_sample()
*
* How to trigger perf_callchain_[user/kernel] :
* $ perf record -e cpu-clock --call-graph fp ./program
* $ perf report --call-graph
*
* On RISC-V platform, the program being sampled and the C library
* need to be compiled with -fno-omit-frame-pointer, otherwise
* the user stack will not contain function frame.
*/
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
unsigned long fp = 0;
/* RISC-V does not support perf in guest mode. */
if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
return;
fp = regs->s0;
perf_callchain_store(entry, regs->sepc);
fp = user_backtrace(entry, fp, regs->ra);
while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
fp = user_backtrace(entry, fp, 0);
}
bool fill_callchain(unsigned long pc, void *entry)
{
return perf_callchain_store(entry, pc);
}
void notrace walk_stackframe(struct task_struct *task,
struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg);
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
/* RISC-V does not support perf in guest mode. */
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
pr_warn("RISC-V does not support perf in guest mode!");
return;
}
walk_stackframe(NULL, regs, fill_callchain, entry);
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/bug.h>
#include <asm/perf_regs.h>
#include <asm/ptrace.h>
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
if (WARN_ON_ONCE((u32)idx >= PERF_REG_RISCV_MAX))
return 0;
return ((unsigned long *)regs)[idx];
}
#define REG_RESERVED (~((1ULL << PERF_REG_RISCV_MAX) - 1))
int perf_reg_validate(u64 mask)
{
if (!mask || mask & REG_RESERVED)
return -EINVAL;
return 0;
}
u64 perf_reg_abi(struct task_struct *task)
{
#if __riscv_xlen == 64
return PERF_SAMPLE_REGS_ABI_64;
#else
return PERF_SAMPLE_REGS_ABI_32;
#endif
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs,
struct pt_regs *regs_user_copy)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
}
......@@ -56,6 +56,7 @@ void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
{
int cpu;
cpumask_clear(out);
for_each_cpu(cpu, in)
cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
}
......@@ -78,13 +79,42 @@ static void ipi_stop(void)
wait_for_interrupt();
}
static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
{
struct cpumask hartid_mask;
int cpu;
smp_mb__before_atomic();
for_each_cpu(cpu, mask)
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
riscv_cpuid_to_hartid_mask(mask, &hartid_mask);
sbi_send_ipi(cpumask_bits(&hartid_mask));
}
static void send_ipi_single(int cpu, enum ipi_message_type op)
{
int hartid = cpuid_to_hartid_map(cpu);
smp_mb__before_atomic();
set_bit(op, &ipi_data[cpu].bits);
smp_mb__after_atomic();
sbi_send_ipi(cpumask_bits(cpumask_of(hartid)));
}
static inline void clear_ipi(void)
{
csr_clear(CSR_SIP, SIE_SSIE);
}
void riscv_software_interrupt(void)
{
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
unsigned long *stats = ipi_data[smp_processor_id()].stats;
/* Clear pending IPI */
csr_clear(CSR_SIP, SIE_SSIE);
clear_ipi();
while (true) {
unsigned long ops;
......@@ -118,23 +148,6 @@ void riscv_software_interrupt(void)
}
}
static void
send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
{
int cpuid, hartid;
struct cpumask hartid_mask;
cpumask_clear(&hartid_mask);
mb();
for_each_cpu(cpuid, to_whom) {
set_bit(operation, &ipi_data[cpuid].bits);
hartid = cpuid_to_hartid_map(cpuid);
cpumask_set_cpu(hartid, &hartid_mask);
}
mb();
sbi_send_ipi(cpumask_bits(&hartid_mask));
}
static const char * const ipi_names[] = {
[IPI_RESCHEDULE] = "Rescheduling interrupts",
[IPI_CALL_FUNC] = "Function call interrupts",
......@@ -156,12 +169,12 @@ void show_ipi_stats(struct seq_file *p, int prec)
void arch_send_call_function_ipi_mask(struct cpumask *mask)
{
send_ipi_message(mask, IPI_CALL_FUNC);
send_ipi_mask(mask, IPI_CALL_FUNC);
}
void arch_send_call_function_single_ipi(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
send_ipi_single(cpu, IPI_CALL_FUNC);
}
void smp_send_stop(void)
......@@ -176,7 +189,7 @@ void smp_send_stop(void)
if (system_state <= SYSTEM_RUNNING)
pr_crit("SMP: stopping secondary CPUs\n");
send_ipi_message(&mask, IPI_CPU_STOP);
send_ipi_mask(&mask, IPI_CPU_STOP);
}
/* Wait up to one second for other CPUs to stop */
......@@ -191,6 +204,5 @@ void smp_send_stop(void)
void smp_send_reschedule(int cpu)
{
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
send_ipi_single(cpu, IPI_RESCHEDULE);
}
......@@ -19,8 +19,8 @@ struct stackframe {
unsigned long ra;
};
static void notrace walk_stackframe(struct task_struct *task,
struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(unsigned long, void *), void *arg)
{
unsigned long fp, sp, pc;
......
......@@ -18,7 +18,7 @@ ENTRY(__asm_copy_from_user)
/* Enable access to user memory */
li t6, SR_SUM
csrs sstatus, t6
csrs CSR_SSTATUS, t6
add a3, a1, a2
/* Use word-oriented copy only if low-order bits match */
......@@ -47,7 +47,7 @@ ENTRY(__asm_copy_from_user)
3:
/* Disable access to user memory */
csrc sstatus, t6
csrc CSR_SSTATUS, t6
li a0, 0
ret
4: /* Edge case: unalignment */
......@@ -72,7 +72,7 @@ ENTRY(__clear_user)
/* Enable access to user memory */
li t6, SR_SUM
csrs sstatus, t6
csrs CSR_SSTATUS, t6
add a3, a0, a1
addi t0, a0, SZREG-1
......@@ -94,7 +94,7 @@ ENTRY(__clear_user)
3:
/* Disable access to user memory */
csrc sstatus, t6
csrc CSR_SSTATUS, t6
li a0, 0
ret
4: /* Edge case: unalignment */
......@@ -114,11 +114,11 @@ ENDPROC(__clear_user)
/* Fixup code for __copy_user(10) and __clear_user(11) */
10:
/* Disable access to user memory */
csrs sstatus, t6
csrs CSR_SSTATUS, t6
mv a0, a2
ret
11:
csrs sstatus, t6
csrs CSR_SSTATUS, t6
mv a0, a1
ret
.previous
......@@ -13,4 +13,7 @@ obj-y += cacheflush.o
obj-y += context.o
obj-y += sifive_l2_cache.o
ifeq ($(CONFIG_MMU),y)
obj-$(CONFIG_SMP) += tlbflush.o
endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
......@@ -47,7 +47,6 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others);
if (mm != current->active_mm || !local) {
cpumask_clear(&hmask);
riscv_cpuid_to_hartid_mask(&others, &hmask);
sbi_remote_fence_i(hmask.bits);
} else {
......
......@@ -57,12 +57,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
/*
* Use the old spbtr name instead of using the current satp
* name to support binutils 2.29 which doesn't know about the
* privileged ISA 1.10 yet.
*/
csr_write(sptbr, virt_to_pfn(next->pgd) | SATP_MODE);
csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
local_flush_tlb_all();
flush_icache_deferred(next);
......
......@@ -435,13 +435,23 @@ static void __init setup_vm_final(void)
clear_fixmap(FIX_PMD);
/* Move to swapper page table */
csr_write(sptbr, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all();
}
void __init paging_init(void)
{
setup_vm_final();
memblocks_present();
sparse_init();
setup_zero_page();
zone_sizes_init();
}
#ifdef CONFIG_SPARSEMEM
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
return vmemmap_populate_basepages(start, end, node);
}
#endif
// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/smp.h>
#include <asm/sbi.h>
void flush_tlb_all(void)
{
sbi_remote_sfence_vma(NULL, 0, -1);
}
static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
unsigned long size)
{
struct cpumask hmask;
riscv_cpuid_to_hartid_mask(cmask, &hmask);
sbi_remote_sfence_vma(hmask.bits, start, size);
}
void flush_tlb_mm(struct mm_struct *mm)
{
__sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
__sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
}
......@@ -2,6 +2,10 @@
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* All RISC-V systems have a timer attached to every hart. These timers can be
* read from the "time" and "timeh" CSRs, and can use the SBI to setup
* events.
*/
#include <linux/clocksource.h>
#include <linux/clockchips.h>
......@@ -12,19 +16,6 @@
#include <asm/smp.h>
#include <asm/sbi.h>
/*
* All RISC-V systems have a timer attached to every hart. These timers can be
* read by the 'rdcycle' pseudo instruction, and can use the SBI to setup
* events. In order to abstract the architecture-specific timer reading and
* setting functions away from the clock event insertion code, we provide
* function pointers to the clockevent subsystem that perform two basic
* operations: rdtime() reads the timer on the current CPU, and
* next_event(delta) sets the next timer event to 'delta' cycles in the future.
* As the timers are inherently a per-cpu resource, these callbacks perform
* operations on the current hart. There is guaranteed to be exactly one timer
* per hart on all RISC-V systems.
*/
static int riscv_clock_next_event(unsigned long delta,
struct clock_event_device *ce)
{
......
......@@ -244,6 +244,7 @@ static int __init plic_init(struct device_node *node,
struct plic_handler *handler;
irq_hw_number_t hwirq;
int cpu, hartid;
u32 threshold = 0;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
......@@ -266,10 +267,16 @@ static int __init plic_init(struct device_node *node,
continue;
}
/*
* When running in M-mode we need to ignore the S-mode handler.
* Here we assume it always comes later, but that might be a
* little fragile.
*/
handler = per_cpu_ptr(&plic_handlers, cpu);
if (handler->present) {
pr_warn("handler already present for context %d.\n", i);
continue;
threshold = 0xffffffff;
goto done;
}
handler->present = true;
......@@ -279,8 +286,9 @@ static int __init plic_init(struct device_node *node,
handler->enable_base =
plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
done:
/* priority must be > threshold to trigger an interrupt */
writel(0, handler->hart_base + CONTEXT_THRESHOLD);
writel(threshold, handler->hart_base + CONTEXT_THRESHOLD);
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
nr_handlers++;
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
#ifndef _ASM_RISCV_PERF_REGS_H
#define _ASM_RISCV_PERF_REGS_H
enum perf_event_riscv_regs {
PERF_REG_RISCV_PC,
PERF_REG_RISCV_RA,
PERF_REG_RISCV_SP,
PERF_REG_RISCV_GP,
PERF_REG_RISCV_TP,
PERF_REG_RISCV_T0,
PERF_REG_RISCV_T1,
PERF_REG_RISCV_T2,
PERF_REG_RISCV_S0,
PERF_REG_RISCV_S1,
PERF_REG_RISCV_A0,
PERF_REG_RISCV_A1,
PERF_REG_RISCV_A2,
PERF_REG_RISCV_A3,
PERF_REG_RISCV_A4,
PERF_REG_RISCV_A5,
PERF_REG_RISCV_A6,
PERF_REG_RISCV_A7,
PERF_REG_RISCV_S2,
PERF_REG_RISCV_S3,
PERF_REG_RISCV_S4,
PERF_REG_RISCV_S5,
PERF_REG_RISCV_S6,
PERF_REG_RISCV_S7,
PERF_REG_RISCV_S8,
PERF_REG_RISCV_S9,
PERF_REG_RISCV_S10,
PERF_REG_RISCV_S11,
PERF_REG_RISCV_T3,
PERF_REG_RISCV_T4,
PERF_REG_RISCV_T5,
PERF_REG_RISCV_T6,
PERF_REG_RISCV_MAX,
};
#endif /* _ASM_RISCV_PERF_REGS_H */
......@@ -60,6 +60,10 @@ ifeq ($(SRCARCH),arm64)
LIBUNWIND_LIBS = -lunwind -lunwind-aarch64
endif
ifeq ($(SRCARCH),riscv)
NO_PERF_REGS := 0
endif
ifeq ($(SRCARCH),csky)
NO_PERF_REGS := 0
endif
......@@ -82,7 +86,7 @@ endif
# Disable it on all other architectures in case libdw unwind
# support is detected in system. Add supported architectures
# to the check.
ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc s390 csky))
ifneq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc s390 csky riscv))
NO_LIBDW_DWARF_UNWIND := 1
endif
......
ifndef NO_DWARF
PERF_HAVE_DWARF_REGS := 1
endif
PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
#ifndef ARCH_PERF_REGS_H
#define ARCH_PERF_REGS_H
#include <stdlib.h>
#include <linux/types.h>
#include <asm/perf_regs.h>
#define PERF_REGS_MASK ((1ULL << PERF_REG_RISCV_MAX) - 1)
#define PERF_REGS_MAX PERF_REG_RISCV_MAX
#if __riscv_xlen == 64
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64
#else
#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_32
#endif
#define PERF_REG_IP PERF_REG_RISCV_PC
#define PERF_REG_SP PERF_REG_RISCV_SP
static inline const char *perf_reg_name(int id)
{
switch (id) {
case PERF_REG_RISCV_PC:
return "pc";
case PERF_REG_RISCV_RA:
return "ra";
case PERF_REG_RISCV_SP:
return "sp";
case PERF_REG_RISCV_GP:
return "gp";
case PERF_REG_RISCV_TP:
return "tp";
case PERF_REG_RISCV_T0:
return "t0";
case PERF_REG_RISCV_T1:
return "t1";
case PERF_REG_RISCV_T2:
return "t2";
case PERF_REG_RISCV_S0:
return "s0";
case PERF_REG_RISCV_S1:
return "s1";
case PERF_REG_RISCV_A0:
return "a0";
case PERF_REG_RISCV_A1:
return "a1";
case PERF_REG_RISCV_A2:
return "a2";
case PERF_REG_RISCV_A3:
return "a3";
case PERF_REG_RISCV_A4:
return "a4";
case PERF_REG_RISCV_A5:
return "a5";
case PERF_REG_RISCV_A6:
return "a6";
case PERF_REG_RISCV_A7:
return "a7";
case PERF_REG_RISCV_S2:
return "s2";
case PERF_REG_RISCV_S3:
return "s3";
case PERF_REG_RISCV_S4:
return "s4";
case PERF_REG_RISCV_S5:
return "s5";
case PERF_REG_RISCV_S6:
return "s6";
case PERF_REG_RISCV_S7:
return "s7";
case PERF_REG_RISCV_S8:
return "s8";
case PERF_REG_RISCV_S9:
return "s9";
case PERF_REG_RISCV_S10:
return "s10";
case PERF_REG_RISCV_S11:
return "s11";
case PERF_REG_RISCV_T3:
return "t3";
case PERF_REG_RISCV_T4:
return "t4";
case PERF_REG_RISCV_T5:
return "t5";
case PERF_REG_RISCV_T6:
return "t6";
default:
return NULL;
}
return NULL;
}
#endif /* ARCH_PERF_REGS_H */
perf-$(CONFIG_DWARF) += dwarf-regs.o
perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
* Mapping of DWARF debug register numbers into register names.
*/
#include <stddef.h>
#include <errno.h> /* for EINVAL */
#include <string.h> /* for strcmp */
#include <dwarf-regs.h>
struct pt_regs_dwarfnum {
const char *name;
unsigned int dwarfnum;
};
#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num}
#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0}
struct pt_regs_dwarfnum riscv_dwarf_regs_table[] = {
REG_DWARFNUM_NAME("%zero", 0),
REG_DWARFNUM_NAME("%ra", 1),
REG_DWARFNUM_NAME("%sp", 2),
REG_DWARFNUM_NAME("%gp", 3),
REG_DWARFNUM_NAME("%tp", 4),
REG_DWARFNUM_NAME("%t0", 5),
REG_DWARFNUM_NAME("%t1", 6),
REG_DWARFNUM_NAME("%t2", 7),
REG_DWARFNUM_NAME("%s0", 8),
REG_DWARFNUM_NAME("%s1", 9),
REG_DWARFNUM_NAME("%a0", 10),
REG_DWARFNUM_NAME("%a1", 11),
REG_DWARFNUM_NAME("%a2", 12),
REG_DWARFNUM_NAME("%a3", 13),
REG_DWARFNUM_NAME("%a4", 14),
REG_DWARFNUM_NAME("%a5", 15),
REG_DWARFNUM_NAME("%a6", 16),
REG_DWARFNUM_NAME("%a7", 17),
REG_DWARFNUM_NAME("%s2", 18),
REG_DWARFNUM_NAME("%s3", 19),
REG_DWARFNUM_NAME("%s4", 20),
REG_DWARFNUM_NAME("%s5", 21),
REG_DWARFNUM_NAME("%s6", 22),
REG_DWARFNUM_NAME("%s7", 23),
REG_DWARFNUM_NAME("%s8", 24),
REG_DWARFNUM_NAME("%s9", 25),
REG_DWARFNUM_NAME("%s10", 26),
REG_DWARFNUM_NAME("%s11", 27),
REG_DWARFNUM_NAME("%t3", 28),
REG_DWARFNUM_NAME("%t4", 29),
REG_DWARFNUM_NAME("%t5", 30),
REG_DWARFNUM_NAME("%t6", 31),
REG_DWARFNUM_END,
};
#define RISCV_MAX_REGS ((sizeof(riscv_dwarf_regs_table) / \
sizeof(riscv_dwarf_regs_table[0])) - 1)
const char *get_arch_regstr(unsigned int n)
{
return (n < RISCV_MAX_REGS) ? riscv_dwarf_regs_table[n].name : NULL;
}
int regs_query_register_offset(const char *name)
{
const struct pt_regs_dwarfnum *roff;
for (roff = riscv_dwarf_regs_table; roff->name; roff++)
if (!strcmp(roff->name, name))
return roff->dwarfnum;
return -EINVAL;
}
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
#include <elfutils/libdwfl.h>
#include "../../util/unwind-libdw.h"
#include "../../util/perf_regs.h"
#include "../../util/event.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
struct unwind_info *ui = arg;
struct regs_dump *user_regs = &ui->sample->user_regs;
Dwarf_Word dwarf_regs[32];
#define REG(r) ({ \
Dwarf_Word val = 0; \
perf_reg_value(&val, user_regs, PERF_REG_RISCV_##r); \
val; \
})
dwarf_regs[0] = 0;
dwarf_regs[1] = REG(RA);
dwarf_regs[2] = REG(SP);
dwarf_regs[3] = REG(GP);
dwarf_regs[4] = REG(TP);
dwarf_regs[5] = REG(T0);
dwarf_regs[6] = REG(T1);
dwarf_regs[7] = REG(T2);
dwarf_regs[8] = REG(S0);
dwarf_regs[9] = REG(S1);
dwarf_regs[10] = REG(A0);
dwarf_regs[11] = REG(A1);
dwarf_regs[12] = REG(A2);
dwarf_regs[13] = REG(A3);
dwarf_regs[14] = REG(A4);
dwarf_regs[15] = REG(A5);
dwarf_regs[16] = REG(A6);
dwarf_regs[17] = REG(A7);
dwarf_regs[18] = REG(S2);
dwarf_regs[19] = REG(S3);
dwarf_regs[20] = REG(S4);
dwarf_regs[21] = REG(S5);
dwarf_regs[22] = REG(S6);
dwarf_regs[23] = REG(S7);
dwarf_regs[24] = REG(S8);
dwarf_regs[25] = REG(S9);
dwarf_regs[26] = REG(S10);
dwarf_regs[27] = REG(S11);
dwarf_regs[28] = REG(T3);
dwarf_regs[29] = REG(T4);
dwarf_regs[30] = REG(T5);
dwarf_regs[31] = REG(T6);
dwfl_thread_state_register_pc(thread, REG(PC));
return dwfl_thread_state_registers(thread, 0, PERF_REG_RISCV_MAX,
dwarf_regs);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment