Commit 7db91e57 authored by Palmer Dabbelt's avatar Palmer Dabbelt

RISC-V: Task implementation

This patch contains the implementation of tasks on RISC-V, most of which
is involved in task switching.
Signed-off-by: default avatarPalmer Dabbelt <palmer@dabbelt.com>
parent 2129a235
#include <generated/asm-offsets.h>
/*
* Based on arm/arm64/include/asm/current.h
*
* Copyright (C) 2016 ARM
* Copyright (C) 2017 SiFive
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __ASM_CURRENT_H
#define __ASM_CURRENT_H
#include <linux/bug.h>
#include <linux/compiler.h>
#ifndef __ASSEMBLY__
struct task_struct;
/*
* This only works because "struct thread_info" is at offset 0 from "struct
* task_struct". This constraint seems to be necessary on other architectures
* as well, but __switch_to enforces it. We can't check TASK_TI here because
* <asm/asm-offsets.h> includes this, and I can't get the definition of "struct
* task_struct" here due to some header ordering problems.
*/
static __always_inline struct task_struct *get_current(void)
{
register struct task_struct *tp __asm__("tp");
return tp;
}
#define current get_current()
#endif /* __ASSEMBLY__ */
#endif /* __ASM_CURRENT_H */
/*
* Copied from arch/arm64/include/asm/kprobes.h
*
* Copyright (C) 2013 Linaro Limited
* Copyright (C) 2017 SiFive
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#ifndef _RISCV_KPROBES_H
#define _RISCV_KPROBES_H
#include <asm-generic/kprobes.h>
#endif /* _RISCV_KPROBES_H */
/*
* Copyright (C) 2012 Regents of the University of California
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _ASM_RISCV_PROCESSOR_H
#define _ASM_RISCV_PROCESSOR_H
#include <linux/const.h>
#include <asm/ptrace.h>
/*
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
#define STACK_ALIGN 16
#ifndef __ASSEMBLY__
struct task_struct;
struct pt_regs;
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
*/
#define current_text_addr() ({ __label__ _l; _l: &&_l; })
/* CPU-specific state of a task */
struct thread_struct {
/* Callee-saved registers */
unsigned long ra;
unsigned long sp; /* Kernel mode stack */
unsigned long s[12]; /* s[0]: frame pointer */
struct __riscv_d_ext_state fstate;
};
#define INIT_THREAD { \
.sp = sizeof(init_stack) + (long)&init_stack, \
}
#define task_pt_regs(tsk) \
((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
- ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->sepc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
/* Do necessary setup to start up a newly executed thread. */
extern void start_thread(struct pt_regs *regs,
unsigned long pc, unsigned long sp);
/* Free all resources held by a thread. */
static inline void release_thread(struct task_struct *dead_task)
{
}
extern unsigned long get_wchan(struct task_struct *p);
static inline void cpu_relax(void)
{
#ifdef __riscv_muldiv
int dummy;
/* In lieu of a halt instruction, induce a long-latency stall. */
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
barrier();
}
static inline void wait_for_interrupt(void)
{
__asm__ __volatile__ ("wfi");
}
struct device_node;
extern int riscv_of_processor_hart(struct device_node *node);
extern void riscv_fill_hwcap(void);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_PROCESSOR_H */
/*
* Copyright (C) 2012 Regents of the University of California
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _ASM_RISCV_SWITCH_TO_H
#define _ASM_RISCV_SWITCH_TO_H
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/csr.h>
extern void __fstate_save(struct task_struct *save_to);
extern void __fstate_restore(struct task_struct *restore_from);
static inline void __fstate_clean(struct pt_regs *regs)
{
regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
}
static inline void fstate_save(struct task_struct *task,
struct pt_regs *regs)
{
if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) {
__fstate_save(task);
__fstate_clean(regs);
}
}
static inline void fstate_restore(struct task_struct *task,
struct pt_regs *regs)
{
if ((regs->sstatus & SR_FS) != SR_FS_OFF) {
__fstate_restore(task);
__fstate_clean(regs);
}
}
static inline void __switch_to_aux(struct task_struct *prev,
struct task_struct *next)
{
struct pt_regs *regs;
regs = task_pt_regs(prev);
if (unlikely(regs->sstatus & SR_SD))
fstate_save(prev, regs);
fstate_restore(next, task_pt_regs(next));
}
extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
#define switch_to(prev, next, last) \
do { \
struct task_struct *__prev = (prev); \
struct task_struct *__next = (next); \
__switch_to_aux(__prev, __next); \
((last) = __switch_to(__prev, __next)); \
} while (0)
#endif /* _ASM_RISCV_SWITCH_TO_H */
/*
* Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _ASM_RISCV_THREAD_INFO_H
#define _ASM_RISCV_THREAD_INFO_H
#include <asm/page.h>
#include <linux/const.h>
/* thread information allocation */
#define THREAD_SIZE_ORDER (1)
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
#ifndef __ASSEMBLY__
#include <asm/processor.h>
#include <asm/csr.h>
typedef unsigned long mm_segment_t;
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - if the members of this struct changes, the assembly constants
* in asm-offsets.c must be updated accordingly
* - thread_info is included in task_struct at an offset of 0. This means that
* tp points to both thread_info and task_struct.
*/
struct thread_info {
unsigned long flags; /* low level flags */
int preempt_count; /* 0=>preemptible, <0=>BUG */
mm_segment_t addr_limit;
/*
* These stack pointers are overwritten on every system call or
* exception. SP is also saved to the stack it can be recovered when
* overwritten.
*/
long kernel_sp; /* Kernel stack pointer */
long user_sp; /* User stack pointer */
int cpu;
};
/*
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
#define INIT_THREAD_INFO(tsk) \
{ \
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
}
#define init_stack (init_thread_union.stack)
#endif /* !__ASSEMBLY__ */
/*
* thread information flags
* - these are process state flags that various assembly files may need to
* access
* - pending work-to-be-done flags are in lowest half-word
* - other flags in upper half-word(s)
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_WORK_MASK \
(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
#endif /* _ASM_RISCV_THREAD_INFO_H */
This diff is collapsed.
This diff is collapsed.
/*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/tick.h>
#include <linux/ptrace.h>
#include <asm/unistd.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
#include <asm/csr.h>
#include <asm/string.h>
#include <asm/switch_to.h>
extern asmlinkage void ret_from_fork(void);
extern asmlinkage void ret_from_kernel_thread(void);
void arch_cpu_idle(void)
{
wait_for_interrupt();
local_irq_enable();
}
void show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
pr_cont("sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
regs->sepc, regs->ra, regs->sp);
pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
regs->gp, regs->tp, regs->t0);
pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
regs->t1, regs->t2, regs->s0);
pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
regs->s1, regs->a0, regs->a1);
pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
regs->a2, regs->a3, regs->a4);
pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
regs->a5, regs->a6, regs->a7);
pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
regs->s2, regs->s3, regs->s4);
pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
regs->s5, regs->s6, regs->s7);
pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
regs->s8, regs->s9, regs->s10);
pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
regs->s11, regs->t3, regs->t4);
pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
regs->t5, regs->t6);
pr_cont("sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n",
regs->sstatus, regs->sbadaddr, regs->scause);
}
void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
regs->sepc = pc;
regs->sp = sp;
set_fs(USER_DS);
}
void flush_thread(void)
{
/*
* Reset FPU context
* frm: round to nearest, ties to even (IEEE default)
* fflags: accrued exceptions cleared
*/
memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
fstate_save(src, task_pt_regs(src));
*dst = *src;
return 0;
}
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
struct pt_regs *childregs = task_pt_regs(p);
/* p->thread holds context to be restored by __switch_to() */
if (unlikely(p->flags & PF_KTHREAD)) {
/* Kernel thread */
const register unsigned long gp __asm__ ("gp");
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gp = gp;
childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
p->thread.ra = (unsigned long)ret_from_kernel_thread;
p->thread.s[0] = usp; /* fn */
p->thread.s[1] = arg;
} else {
*childregs = *(current_pt_regs());
if (usp) /* User fork */
childregs->sp = usp;
if (clone_flags & CLONE_SETTLS)
childregs->tp = childregs->a5;
childregs->a0 = 0; /* Return value of fork() */
p->thread.ra = (unsigned long)ret_from_fork;
}
p->thread.sp = (unsigned long)childregs; /* kernel sp */
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment