Commit f183d269 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'csky-for-linus-5.7-rc1' of git://github.com/c-sky/csky-linux

Pull csky updates from Guo Ren:

 - Add kproobes/uprobes support

 - Add lockdep, rseq, gcov support

 - Fixup init_fpu

 - Fixup ftrace_modify deadlock

 - Fixup speculative execution on IO area

* tag 'csky-for-linus-5.7-rc1' of git://github.com/c-sky/csky-linux:
  csky: Fixup cpu speculative execution to IO area
  csky: Add uprobes support
  csky: Add kprobes supported
  csky: Enable LOCKDEP_SUPPORT
  csky: Enable the gcov function
  csky: Fixup get wrong psr value from phyical reg
  csky/ftrace: Fixup ftrace_modify_code deadlock without CPU_HAS_ICACHE_INS
  csky: Implement ftrace with regs
  csky: Add support for restartable sequence
  csky: Implement ptrace regs and stack API
  csky: Fixup init_fpu compile warning with __init
parents b6ff1070 aefd9461
......@@ -3,6 +3,7 @@ config CSKY
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP
......@@ -38,16 +39,22 @@ config CSKY
select HAVE_ARCH_AUDITSYSCALL
select HAVE_COPY_THREAD_TLS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
select HAVE_KPROBES if !CPU_CK610
select HAVE_KPROBES_ON_FTRACE if !CPU_CK610
select HAVE_KRETPROBES if !CPU_CK610
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_DMA_CONTIGUOUS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select MAY_HAVE_SPARSE_IRQ
......@@ -65,6 +72,12 @@ config CSKY
select PCI_SYSCALL if PCI
select PCI_MSI if PCI
config LOCKDEP_SUPPORT
def_bool y
config ARCH_SUPPORTS_UPROBES
def_bool y if !CPU_CK610
config CPU_HAS_CACHEV2
bool
......
......@@ -172,10 +172,7 @@
addi r6, 0xe
cpwcr r6, cpcr30
lsri r6, 28
addi r6, 2
lsli r6, 28
addi r6, 0xe
movi r6, 0
cpwcr r6, cpcr31
.endm
......
......@@ -10,11 +10,6 @@
#define MTCR_DIST 0xC0006420
#define MFCR_DIST 0xC0006020
void __init init_fpu(void)
{
mtcr("cr<1, 2>", 0);
}
/*
* fpu_libc_helper() is to help libc to excute:
* - mfcr %a, cr<1, 2>
......
......@@ -100,6 +100,66 @@
rte
.endm
.macro SAVE_REGS_FTRACE
subi sp, 152
stw tls, (sp, 0)
stw lr, (sp, 4)
mfcr lr, psr
stw lr, (sp, 12)
addi lr, sp, 152
stw lr, (sp, 16)
stw a0, (sp, 20)
stw a0, (sp, 24)
stw a1, (sp, 28)
stw a2, (sp, 32)
stw a3, (sp, 36)
addi sp, 40
stm r4-r13, (sp)
addi sp, 40
stm r16-r30, (sp)
#ifdef CONFIG_CPU_HAS_HILO
mfhi lr
stw lr, (sp, 60)
mflo lr
stw lr, (sp, 64)
mfcr lr, cr14
stw lr, (sp, 68)
#endif
subi sp, 80
.endm
.macro RESTORE_REGS_FTRACE
ldw tls, (sp, 0)
ldw a0, (sp, 16)
mtcr a0, ss0
#ifdef CONFIG_CPU_HAS_HILO
ldw a0, (sp, 140)
mthi a0
ldw a0, (sp, 144)
mtlo a0
ldw a0, (sp, 148)
mtcr a0, cr14
#endif
ldw a0, (sp, 24)
ldw a1, (sp, 28)
ldw a2, (sp, 32)
ldw a3, (sp, 36)
addi sp, 40
ldm r4-r13, (sp)
addi sp, 40
ldm r16-r30, (sp)
addi sp, 72
mfcr sp, ss0
.endm
.macro SAVE_SWITCH_STACK
subi sp, 64
stm r4-r11, (sp)
......@@ -230,11 +290,8 @@
addi r6, 0x1ce
mtcr r6, cr<30, 15> /* Set MSA0 */
lsri r6, 28
addi r6, 2
lsli r6, 28
addi r6, 0x1ce
mtcr r6, cr<31, 15> /* Set MSA1 */
movi r6, 0
mtcr r6, cr<31, 15> /* Clr MSA1 */
/* enable MMU */
mfcr r6, cr18
......
......@@ -9,7 +9,8 @@
int fpu_libc_helper(struct pt_regs *regs);
void fpu_fpe(struct pt_regs *regs);
void __init init_fpu(void);
static inline void init_fpu(void) { mtcr("cr<1, 2>", 0); }
void save_to_user_fp(struct user_fp *user_fp);
void restore_from_user_fp(struct user_fp *user_fp);
......
......@@ -3,6 +3,8 @@
#include <linux/linkage.h>
#include <asm/ftrace.h>
#include <abi/entry.h>
#include <asm/asm-offsets.h>
/*
* csky-gcc with -pg will put the following asm after prologue:
......@@ -44,6 +46,22 @@
jmp t1
.endm
.macro mcount_enter_regs
subi sp, 8
stw lr, (sp, 0)
stw r8, (sp, 4)
SAVE_REGS_FTRACE
.endm
.macro mcount_exit_regs
RESTORE_REGS_FTRACE
ldw t1, (sp, 0)
ldw r8, (sp, 4)
ldw lr, (sp, 8)
addi sp, 12
jmp t1
.endm
.macro save_return_regs
subi sp, 16
stw a0, (sp, 0)
......@@ -122,6 +140,8 @@ ENTRY(ftrace_caller)
ldw a0, (sp, 16)
subi a0, 4
ldw a1, (sp, 24)
lrw a2, function_trace_op
ldw a2, (a2, 0)
nop
GLOBAL(ftrace_call)
......@@ -157,3 +177,31 @@ ENTRY(return_to_handler)
jmp lr
END(return_to_handler)
#endif
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
ENTRY(ftrace_regs_caller)
mcount_enter_regs
lrw t1, PT_FRAME_SIZE
add t1, sp
ldw a0, (t1, 0)
subi a0, 4
ldw a1, (t1, 8)
lrw a2, function_trace_op
ldw a2, (a2, 0)
mov a3, sp
nop
GLOBAL(ftrace_regs_call)
nop32_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
nop
GLOBAL(ftrace_graph_regs_call)
nop32_stub
#endif
mcount_exit_regs
ENDPROC(ftrace_regs_caller)
#endif /* CONFIG_DYNAMIC_FTRACE */
......@@ -10,6 +10,8 @@
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#define ARCH_SUPPORTS_FTRACE_OPS 1
#define MCOUNT_ADDR ((unsigned long)_mcount)
#ifndef __ASSEMBLY__
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_CSKY_KPROBES_H
#define __ASM_CSKY_KPROBES_H
#include <asm-generic/kprobes.h>
#ifdef CONFIG_KPROBES
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/percpu.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 1
#define flush_insn_slot(p) do { } while (0)
#define kretprobe_blacklist_size 0
#include <asm/probes.h>
struct prev_kprobe {
struct kprobe *kp;
unsigned int status;
};
/* Single step context for kprobe */
struct kprobe_step_ctx {
unsigned long ss_pending;
unsigned long match_addr;
};
/* per-cpu kprobe control block */
struct kprobe_ctlblk {
unsigned int kprobe_status;
unsigned long saved_sr;
struct prev_kprobe prev_kprobe;
struct kprobe_step_ctx ss_ctx;
};
void arch_remove_kprobe(struct kprobe *p);
int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
int kprobe_breakpoint_handler(struct pt_regs *regs);
int kprobe_single_step_handler(struct pt_regs *regs);
void kretprobe_trampoline(void);
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
#endif /* CONFIG_KPROBES */
#endif /* __ASM_CSKY_KPROBES_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_CSKY_PROBES_H
#define __ASM_CSKY_PROBES_H
typedef u32 probe_opcode_t;
typedef void (probes_handler_t) (u32 opcode, long addr, struct pt_regs *);
/* architecture specific copy of original instruction */
struct arch_probe_insn {
probe_opcode_t *insn;
probes_handler_t *handler;
/* restore address after simulation */
unsigned long restore;
};
#ifdef CONFIG_KPROBES
typedef u32 kprobe_opcode_t;
struct arch_specific_insn {
struct arch_probe_insn api;
};
#endif
#endif /* __ASM_CSKY_PROBES_H */
......@@ -43,6 +43,7 @@ extern struct cpuinfo_csky cpu_data[];
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
unsigned long sr; /* saved status register */
unsigned long trap_no; /* saved status register */
/* FPU regs */
struct user_fp __aligned(16) user_fp;
......
......@@ -7,11 +7,14 @@
#include <uapi/asm/ptrace.h>
#include <asm/traps.h>
#include <linux/types.h>
#include <linux/compiler.h>
#ifndef __ASSEMBLY__
#define PS_S 0x80000000 /* Supervisor Mode */
#define USR_BKPT 0x1464
#define arch_has_single_step() (1)
#define current_pt_regs() \
({ (struct pt_regs *)((char *)current_thread_info() + THREAD_SIZE) - 1; })
......@@ -22,6 +25,18 @@
#define instruction_pointer(regs) ((regs)->pc)
#define profile_pc(regs) instruction_pointer(regs)
static inline void instruction_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->pc = val;
}
#if defined(__CSKYABIV2__)
#define MAX_REG_OFFSET offsetof(struct pt_regs, dcsr)
#else
#define MAX_REG_OFFSET offsetof(struct pt_regs, regs[9])
#endif
static inline bool in_syscall(struct pt_regs const *regs)
{
return ((regs->sr >> 16) & 0xff) == VEC_TRAP0;
......@@ -37,5 +52,33 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
return regs->a0;
}
/* Valid only for Kernel mode traps. */
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->usp;
}
extern int regs_query_register_offset(const char *name);
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n);
/*
* regs_get_register() - get register value from its offset
* @regs: pt_regs from which register value is gotten
* @offset: offset of the register.
*
* regs_get_register returns the value of a register whose offset from @regs.
* The @offset is the offset of the register in struct pt_regs.
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
*/
static inline unsigned long regs_get_register(struct pt_regs *regs,
unsigned int offset)
{
if (unlikely(offset > MAX_REG_OFFSET))
return 0;
return *(unsigned long *)((unsigned long)regs + offset);
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_CSKY_PTRACE_H */
......@@ -57,6 +57,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */
#define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing */
#define TIF_UPROBE 6 /* uprobe breakpoint or singlestep */
#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
......@@ -68,6 +69,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_MEMDIE (1 << TIF_MEMDIE)
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
......
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_CSKY_UPROBES_H
#define __ASM_CSKY_UPROBES_H
#include <asm/probes.h>
#define MAX_UINSN_BYTES 4
#define UPROBE_SWBP_INSN USR_BKPT
#define UPROBE_SWBP_INSN_SIZE 2
#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
typedef u32 uprobe_opcode_t;
struct arch_uprobe_task {
unsigned long saved_trap_no;
};
struct arch_uprobe {
union {
u8 insn[MAX_UINSN_BYTES];
u8 ixol[MAX_UINSN_BYTES];
};
struct arch_probe_insn api;
unsigned long insn_size;
bool simulate;
};
int uprobe_breakpoint_handler(struct pt_regs *regs);
int uprobe_single_step_handler(struct pt_regs *regs);
#endif /* __ASM_CSKY_UPROBES_H */
......@@ -4,6 +4,7 @@ extra-y := head.o vmlinux.lds
obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o
obj-y += power.o syscall.o syscall_table.o setup.o
obj-y += process.o cpu-probe.o ptrace.o dumpstack.o
obj-y += probes/
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o
......
......@@ -72,6 +72,7 @@ int main(void)
DEFINE(PT_RLO, offsetof(struct pt_regs, rlo));
#endif
DEFINE(PT_USP, offsetof(struct pt_regs, usp));
DEFINE(PT_FRAME_SIZE, sizeof(struct pt_regs));
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t,
......
......@@ -128,7 +128,10 @@ tlbop_end 1
ENTRY(csky_systemcall)
SAVE_ALL TRAP0_SIZE
zero_fp
#ifdef CONFIG_RSEQ_DEBUG
mov a0, sp
jbsr rseq_syscall
#endif
psrset ee, ie
lrw r11, __NR_syscalls
......@@ -218,10 +221,17 @@ ret_from_exception:
andn r9, r10
ldw r12, (r9, TINFO_FLAGS)
andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED)
andi r12, (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | _TIF_UPROBE)
cmpnei r12, 0
bt exit_work
1:
#ifdef CONFIG_TRACE_IRQFLAGS
ld r10, (sp, LSAVE_PSR)
btsti r10, 6
bf 2f
jbsr trace_hardirqs_on
2:
#endif
RESTORE_ALL
exit_work:
......@@ -277,6 +287,10 @@ ENTRY(csky_irq)
zero_fp
psrset ee
#ifdef CONFIG_TRACE_IRQFLAGS
jbsr trace_hardirqs_off
#endif
#ifdef CONFIG_PREEMPTION
mov r9, sp /* Get current stack pointer */
bmaski r10, THREAD_SHIFT
......
......@@ -3,6 +3,7 @@
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_DYNAMIC_FTRACE
......@@ -126,6 +127,9 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
{
int ret = ftrace_modify_code((unsigned long)&ftrace_call,
(unsigned long)func, true, true);
if (!ret)
ret = ftrace_modify_code((unsigned long)&ftrace_regs_call,
(unsigned long)func, true, true);
return ret;
}
......@@ -135,6 +139,14 @@ int __init ftrace_dyn_arch_init(void)
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
return ftrace_modify_code(rec->ip, addr, true, true);
}
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
......@@ -190,5 +202,35 @@ int ftrace_disable_ftrace_graph_caller(void)
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifndef CONFIG_CPU_HAS_ICACHE_INS
struct ftrace_modify_param {
int command;
atomic_t cpu_count;
};
static int __ftrace_modify_code(void *data)
{
struct ftrace_modify_param *param = data;
if (atomic_inc_return(&param->cpu_count) == 1) {
ftrace_modify_all_code(param->command);
atomic_inc(&param->cpu_count);
} else {
while (atomic_read(&param->cpu_count) <= num_online_cpus())
cpu_relax();
local_icache_inv_all(NULL);
}
return 0;
}
void arch_ftrace_update_code(int command)
{
struct ftrace_modify_param param = { command, ATOMIC_INIT(0) };
stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
}
#endif
/* _mcount is defined in abi's mcount.S */
EXPORT_SYMBOL(_mcount);
......@@ -21,6 +21,11 @@ END(_start)
ENTRY(_start_smp_secondary)
SETUP_MMU
/* copy msa1 from CPU0 */
lrw r6, secondary_msa1
ld.w r6, (r6, 0)
mtcr r6, cr<31, 15>
/* set stack point */
lrw r6, secondary_stack
ld.w r6, (r6, 0)
......
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o simulate-insn.o
obj-$(CONFIG_KPROBES) += kprobes_trampoline.o
obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o simulate-insn.o
CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <asm/sections.h>
#include "decode-insn.h"
#include "simulate-insn.h"
/* Return:
* INSN_REJECTED If instruction is one not allowed to kprobe,
* INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
*/
enum probe_insn __kprobes
csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *api)
{
probe_opcode_t insn = le32_to_cpu(*addr);
CSKY_INSN_SET_SIMULATE(br16, insn);
CSKY_INSN_SET_SIMULATE(bt16, insn);
CSKY_INSN_SET_SIMULATE(bf16, insn);
CSKY_INSN_SET_SIMULATE(jmp16, insn);
CSKY_INSN_SET_SIMULATE(jsr16, insn);
CSKY_INSN_SET_SIMULATE(lrw16, insn);
CSKY_INSN_SET_SIMULATE(pop16, insn);
CSKY_INSN_SET_SIMULATE(br32, insn);
CSKY_INSN_SET_SIMULATE(bt32, insn);
CSKY_INSN_SET_SIMULATE(bf32, insn);
CSKY_INSN_SET_SIMULATE(jmp32, insn);
CSKY_INSN_SET_SIMULATE(jsr32, insn);
CSKY_INSN_SET_SIMULATE(lrw32, insn);
CSKY_INSN_SET_SIMULATE(pop32, insn);
CSKY_INSN_SET_SIMULATE(bez32, insn);
CSKY_INSN_SET_SIMULATE(bnez32, insn);
CSKY_INSN_SET_SIMULATE(bnezad32, insn);
CSKY_INSN_SET_SIMULATE(bhsz32, insn);
CSKY_INSN_SET_SIMULATE(bhz32, insn);
CSKY_INSN_SET_SIMULATE(blsz32, insn);
CSKY_INSN_SET_SIMULATE(blz32, insn);
CSKY_INSN_SET_SIMULATE(bsr32, insn);
CSKY_INSN_SET_SIMULATE(jmpi32, insn);
CSKY_INSN_SET_SIMULATE(jsri32, insn);
return INSN_GOOD;
}
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __CSKY_KERNEL_KPROBES_DECODE_INSN_H
#define __CSKY_KERNEL_KPROBES_DECODE_INSN_H
#include <asm/sections.h>
#include <asm/kprobes.h>
enum probe_insn {
INSN_REJECTED,
INSN_GOOD_NO_SLOT,
INSN_GOOD,
};
#define is_insn32(insn) ((insn & 0xc000) == 0xc000)
enum probe_insn __kprobes
csky_probe_decode_insn(probe_opcode_t *addr, struct arch_probe_insn *asi);
#endif /* __CSKY_KERNEL_KPROBES_DECODE_INSN_H */
// SPDX-License-Identifier: GPL-2.0
#include <linux/kprobes.h>
int arch_check_ftrace_location(struct kprobe *p)
{
if (ftrace_location((unsigned long)p->addr))
p->flags |= KPROBE_FLAG_FTRACE;
return 0;
}
/* Ftrace callback handler for kprobes -- called under preepmt disabed */
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{
bool lr_saver = false;
struct kprobe *p;
struct kprobe_ctlblk *kcb;
/* Preempt is disabled by ftrace */
p = get_kprobe((kprobe_opcode_t *)ip);
if (!p) {
p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE));
if (unlikely(!p) || kprobe_disabled(p))
return;
lr_saver = true;
}
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
unsigned long orig_ip = instruction_pointer(regs);
if (lr_saver)
ip -= MCOUNT_INSN_SIZE;
instruction_pointer_set(regs, ip);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
/*
* Emulate singlestep (and also recover regs->pc)
* as if there is a nop
*/
instruction_pointer_set(regs,
(unsigned long)p->addr + MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
instruction_pointer_set(regs, orig_ip);
}
/*
* If pre_handler returns !0, it changes regs->pc. We have to
* skip emulating post_handler.
*/
__this_cpu_write(current_kprobe, NULL);
}
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.api.insn = NULL;
return 0;
}
// SPDX-License-Identifier: GPL-2.0+
#include <linux/kprobes.h>
#include <linux/extable.h>
#include <linux/slab.h>
#include <linux/stop_machine.h>
#include <asm/ptrace.h>
#include <linux/uaccess.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#include "decode-insn.h"
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
struct csky_insn_patch {
kprobe_opcode_t *addr;
u32 opcode;
atomic_t cpu_count;
};
static int __kprobes patch_text_cb(void *priv)
{
struct csky_insn_patch *param = priv;
unsigned int addr = (unsigned int)param->addr;
if (atomic_inc_return(&param->cpu_count) == 1) {
*(u16 *) addr = cpu_to_le16(param->opcode);
dcache_wb_range(addr, addr + 2);
atomic_inc(&param->cpu_count);
} else {
while (atomic_read(&param->cpu_count) <= num_online_cpus())
cpu_relax();
}
icache_inv_range(addr, addr + 2);
return 0;
}
static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
{
struct csky_insn_patch param = { addr, opcode, ATOMIC_INIT(0) };
return stop_machine_cpuslocked(patch_text_cb, &param, cpu_online_mask);
}
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
{
unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
p->ainsn.api.restore = (unsigned long)p->addr + offset;
patch_text(p->ainsn.api.insn, p->opcode);
}
static void __kprobes arch_prepare_simulate(struct kprobe *p)
{
p->ainsn.api.restore = 0;
}
static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (p->ainsn.api.handler)
p->ainsn.api.handler((u32)p->opcode, (long)p->addr, regs);
post_kprobe_handler(kcb, regs);
}
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
unsigned long probe_addr = (unsigned long)p->addr;
if (probe_addr & 0x1) {
pr_warn("Address not aligned.\n");
return -EINVAL;
}
/* copy instruction */
p->opcode = le32_to_cpu(*p->addr);
/* decode instruction */
switch (csky_probe_decode_insn(p->addr, &p->ainsn.api)) {
case INSN_REJECTED: /* insn not supported */
return -EINVAL;
case INSN_GOOD_NO_SLOT: /* insn need simulation */
p->ainsn.api.insn = NULL;
break;
case INSN_GOOD: /* instruction uses slot */
p->ainsn.api.insn = get_insn_slot();
if (!p->ainsn.api.insn)
return -ENOMEM;
break;
}
/* prepare the instruction */
if (p->ainsn.api.insn)
arch_prepare_ss_slot(p);
else
arch_prepare_simulate(p);
return 0;
}
/* install breakpoint in text */
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
patch_text(p->addr, USR_BKPT);
}
/* remove breakpoint from text */
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
patch_text(p->addr, p->opcode);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
static void __kprobes set_current_kprobe(struct kprobe *p)
{
__this_cpu_write(current_kprobe, p);
}
/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
* interrupt occurrence in the period of exception return and start of
* out-of-line single-step, that result in wrongly single stepping
* into the interrupt handler.
*/
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
kcb->saved_sr = regs->sr;
regs->sr &= ~BIT(6);
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
regs->sr = kcb->saved_sr;
}
static void __kprobes
set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p)
{
unsigned long offset = is_insn32(p->opcode) ? 4 : 2;
kcb->ss_ctx.ss_pending = true;
kcb->ss_ctx.match_addr = addr + offset;
}
static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
{
kcb->ss_ctx.ss_pending = false;
kcb->ss_ctx.match_addr = 0;
}
#define TRACE_MODE_SI BIT(14)
#define TRACE_MODE_MASK ~(0x3 << 14)
#define TRACE_MODE_RUN 0
static void __kprobes setup_singlestep(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb, int reenter)
{
unsigned long slot;
if (reenter) {
save_previous_kprobe(kcb);
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_REENTER;
} else {
kcb->kprobe_status = KPROBE_HIT_SS;
}
if (p->ainsn.api.insn) {
/* prepare for single stepping */
slot = (unsigned long)p->ainsn.api.insn;
set_ss_context(kcb, slot, p); /* mark pending ss */
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
instruction_pointer_set(regs, slot);
} else {
/* insn simulation */
arch_simulate_insn(p, regs);
}
}
static int __kprobes reenter_kprobe(struct kprobe *p,
struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
switch (kcb->kprobe_status) {
case KPROBE_HIT_SSDONE:
case KPROBE_HIT_ACTIVE:
kprobes_inc_nmissed_count(p);
setup_singlestep(p, regs, kcb, 1);
break;
case KPROBE_HIT_SS:
case KPROBE_REENTER:
pr_warn("Unrecoverable kprobe detected.\n");
dump_kprobe(p);
BUG();
break;
default:
WARN_ON(1);
return 0;
}
return 1;
}
static void __kprobes
post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
if (!cur)
return;
/* return addr restore if non-branching insn */
if (cur->ainsn.api.restore != 0)
regs->pc = cur->ainsn.api.restore;
/* restore back original saved kprobe variables and continue */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
return;
}
/* call post handler */
kcb->kprobe_status = KPROBE_HIT_SSDONE;
if (cur->post_handler) {
/* post_handler can hit breakpoint and single step
* again, so we enable D-flag for recursive exception.
*/
cur->post_handler(cur, regs, 0);
}
reset_current_kprobe();
}
int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
switch (kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe and the ip points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->pc = (unsigned long) cur->addr;
if (!instruction_pointer(regs))
BUG();
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accounting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
if (fixup_exception(regs))
return 1;
}
return 0;
}
int __kprobes
kprobe_breakpoint_handler(struct pt_regs *regs)
{
struct kprobe *p, *cur_kprobe;
struct kprobe_ctlblk *kcb;
unsigned long addr = instruction_pointer(regs);
kcb = get_kprobe_ctlblk();
cur_kprobe = kprobe_running();
p = get_kprobe((kprobe_opcode_t *) addr);
if (p) {
if (cur_kprobe) {
if (reenter_kprobe(p, regs, kcb))
return 1;
} else {
/* Probe hit */
set_current_kprobe(p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
/*
* If we have no pre-handler or it returned 0, we
* continue with normal processing. If we have a
* pre-handler and it returned non-zero, it will
* modify the execution path and no need to single
* stepping. Let's just reset current kprobe and exit.
*
* pre_handler can hit a breakpoint and can step thru
* before return.
*/
if (!p->pre_handler || !p->pre_handler(p, regs))
setup_singlestep(p, regs, kcb, 0);
else
reset_current_kprobe();
}
return 1;
}
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
* Return back to original instruction, and continue.
*/
return 0;
}
int __kprobes
kprobe_single_step_handler(struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if ((kcb->ss_ctx.ss_pending)
&& (kcb->ss_ctx.match_addr == instruction_pointer(regs))) {
clear_ss_context(kcb); /* clear pending ss */
kprobes_restore_local_irqflag(kcb, regs);
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
post_kprobe_handler(kcb, regs);
return 1;
}
return 0;
}
/*
* Provide a blacklist of symbols identifying ranges which cannot be kprobed.
* This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
*/
int __init arch_populate_kprobe_blacklist(void)
{
int ret;
ret = kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
(unsigned long)__irqentry_text_end);
return ret;
}
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =
(unsigned long)&kretprobe_trampoline;
kprobe_opcode_t *correct_ret_addr = NULL;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because multiple functions in the call path have
* return probes installed on them, and/or more than one
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always pushed into the head of the list
* - when multiple return probes are registered for the same
* function, the (chronologically) first instance's ret_addr
* will be the real return address, and all the rest will
* point to kretprobe_trampoline.
*/
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
correct_ret_addr = ri->ret_addr;
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
orig_ret_address = (unsigned long)ri->ret_addr;
if (ri->rp && ri->rp->handler) {
__this_cpu_write(current_kprobe, &ri->rp->kp);
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
ri->ret_addr = correct_ret_addr;
ri->rp->handler(ri, regs);
__this_cpu_write(current_kprobe, NULL);
}
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_hash_unlock(current, &flags);
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
return (void *)orig_ret_address;
}
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)regs->lr;
regs->lr = (unsigned long) &kretprobe_trampoline;
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
return 0;
}
int __init arch_init_kprobes(void)
{
return 0;
}
/* SPDX-License-Identifier: GPL-2.0+ */
#include <linux/linkage.h>
#include <abi/entry.h>
ENTRY(kretprobe_trampoline)
SAVE_REGS_FTRACE
mov a0, sp /* pt_regs */
jbsr trampoline_probe_handler
/* use the result as the return-address */
mov lr, a0
RESTORE_REGS_FTRACE
rts
ENDPROC(kretprobe_trampoline)
// SPDX-License-Identifier: GPL-2.0+
#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include "decode-insn.h"
#include "simulate-insn.h"
static inline bool csky_insn_reg_get_val(struct pt_regs *regs,
unsigned long index,
unsigned long *ptr)
{
if (index < 14)
*ptr = *(&regs->a0 + index);
if (index > 15 && index < 31)
*ptr = *(&regs->exregs[0] + index - 16);
switch (index) {
case 14:
*ptr = regs->usp;
break;
case 15:
*ptr = regs->lr;
break;
case 31:
*ptr = regs->tls;
break;
default:
goto fail;
}
return true;
fail:
return false;
}
static inline bool csky_insn_reg_set_val(struct pt_regs *regs,
unsigned long index,
unsigned long val)
{
if (index < 14)
*(&regs->a0 + index) = val;
if (index > 15 && index < 31)
*(&regs->exregs[0] + index - 16) = val;
switch (index) {
case 14:
regs->usp = val;
break;
case 15:
regs->lr = val;
break;
case 31:
regs->tls = val;
break;
default:
goto fail;
}
return true;
fail:
return false;
}
void __kprobes
simulate_br16(u32 opcode, long addr, struct pt_regs *regs)
{
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0x3ff) << 1, 9));
}
void __kprobes
simulate_br32(u32 opcode, long addr, struct pt_regs *regs)
{
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
}
void __kprobes
simulate_bt16(u32 opcode, long addr, struct pt_regs *regs)
{
if (regs->sr & 1)
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0x3ff) << 1, 9));
else
instruction_pointer_set(regs, addr + 2);
}
void __kprobes
simulate_bt32(u32 opcode, long addr, struct pt_regs *regs)
{
if (regs->sr & 1)
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
else
instruction_pointer_set(regs, addr + 4);
}
void __kprobes
simulate_bf16(u32 opcode, long addr, struct pt_regs *regs)
{
if (!(regs->sr & 1))
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0x3ff) << 1, 9));
else
instruction_pointer_set(regs, addr + 2);
}
void __kprobes
simulate_bf32(u32 opcode, long addr, struct pt_regs *regs)
{
if (!(regs->sr & 1))
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
else
instruction_pointer_set(regs, addr + 4);
}
void __kprobes
simulate_jmp16(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = (opcode >> 2) & 0xf;
csky_insn_reg_get_val(regs, tmp, &tmp);
instruction_pointer_set(regs, tmp & 0xfffffffe);
}
void __kprobes
simulate_jmp32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
csky_insn_reg_get_val(regs, tmp, &tmp);
instruction_pointer_set(regs, tmp & 0xfffffffe);
}
void __kprobes
simulate_jsr16(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = (opcode >> 2) & 0xf;
csky_insn_reg_get_val(regs, tmp, &tmp);
regs->lr = addr + 2;
instruction_pointer_set(regs, tmp & 0xfffffffe);
}
void __kprobes
simulate_jsr32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
csky_insn_reg_get_val(regs, tmp, &tmp);
regs->lr = addr + 4;
instruction_pointer_set(regs, tmp & 0xfffffffe);
}
void __kprobes
simulate_lrw16(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long val;
unsigned long tmp = (opcode & 0x300) >> 3;
unsigned long offset = ((opcode & 0x1f) | tmp) << 2;
tmp = (opcode & 0xe0) >> 5;
val = *(unsigned int *)(instruction_pointer(regs) + offset);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_lrw32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long val;
unsigned long offset = (opcode & 0xffff0000) >> 14;
unsigned long tmp = opcode & 0x0000001f;
val = *(unsigned int *)
((instruction_pointer(regs) + offset) & 0xfffffffc);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_pop16(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long *tmp = (unsigned long *)regs->usp;
int i;
for (i = 0; i < (opcode & 0xf); i++) {
csky_insn_reg_set_val(regs, i + 4, *tmp);
tmp += 1;
}
if (opcode & 0x10) {
csky_insn_reg_set_val(regs, 15, *tmp);
tmp += 1;
}
regs->usp = (unsigned long)tmp;
instruction_pointer_set(regs, regs->lr);
}
void __kprobes
simulate_pop32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long *tmp = (unsigned long *)regs->usp;
int i;
for (i = 0; i < ((opcode & 0xf0000) >> 16); i++) {
csky_insn_reg_set_val(regs, i + 4, *tmp);
tmp += 1;
}
if (opcode & 0x100000) {
csky_insn_reg_set_val(regs, 15, *tmp);
tmp += 1;
}
for (i = 0; i < ((opcode & 0xe00000) >> 21); i++) {
csky_insn_reg_set_val(regs, i + 16, *tmp);
tmp += 1;
}
if (opcode & 0x1000000) {
csky_insn_reg_set_val(regs, 29, *tmp);
tmp += 1;
}
regs->usp = (unsigned long)tmp;
instruction_pointer_set(regs, regs->lr);
}
void __kprobes
simulate_bez32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
csky_insn_reg_get_val(regs, tmp, &tmp);
if (tmp == 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
}
void __kprobes
simulate_bnez32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
csky_insn_reg_get_val(regs, tmp, &tmp);
if (tmp != 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
}
void __kprobes
simulate_bnezad32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
unsigned long val;
csky_insn_reg_get_val(regs, tmp, &val);
val -= 1;
if (val > 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_bhsz32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
unsigned long val;
csky_insn_reg_get_val(regs, tmp, &val);
if (val >= 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_bhz32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
unsigned long val;
csky_insn_reg_get_val(regs, tmp, &val);
if (val > 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_blsz32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
unsigned long val;
csky_insn_reg_get_val(regs, tmp, &val);
if (val <= 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_blz32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp = opcode & 0x1f;
unsigned long val;
csky_insn_reg_get_val(regs, tmp, &val);
if (val < 0) {
instruction_pointer_set(regs,
addr + sign_extend32((opcode & 0xffff0000) >> 15, 15));
} else
instruction_pointer_set(regs, addr + 4);
csky_insn_reg_set_val(regs, tmp, val);
}
void __kprobes
simulate_bsr32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long tmp;
tmp = (opcode & 0xffff) << 16;
tmp |= (opcode & 0xffff0000) >> 16;
instruction_pointer_set(regs,
addr + sign_extend32((tmp & 0x3ffffff) << 1, 15));
regs->lr = addr + 4;
}
void __kprobes
simulate_jmpi32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long val;
unsigned long offset = ((opcode & 0xffff0000) >> 14);
val = *(unsigned int *)
((instruction_pointer(regs) + offset) & 0xfffffffc);
instruction_pointer_set(regs, val);
}
void __kprobes
simulate_jsri32(u32 opcode, long addr, struct pt_regs *regs)
{
unsigned long val;
unsigned long offset = ((opcode & 0xffff0000) >> 14);
val = *(unsigned int *)
((instruction_pointer(regs) + offset) & 0xfffffffc);
regs->lr = addr + 4;
instruction_pointer_set(regs, val);
}
/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef __CSKY_KERNEL_PROBES_SIMULATE_INSN_H
#define __CSKY_KERNEL_PROBES_SIMULATE_INSN_H
#define __CSKY_INSN_FUNCS(name, mask, val) \
static __always_inline bool csky_insn_is_##name(probe_opcode_t code) \
{ \
BUILD_BUG_ON(~(mask) & (val)); \
return (code & (mask)) == (val); \
} \
void simulate_##name(u32 opcode, long addr, struct pt_regs *regs);
#define CSKY_INSN_SET_SIMULATE(name, code) \
do { \
if (csky_insn_is_##name(code)) { \
api->handler = simulate_##name; \
return INSN_GOOD_NO_SLOT; \
} \
} while (0)
__CSKY_INSN_FUNCS(br16, 0xfc00, 0x0400)
__CSKY_INSN_FUNCS(bt16, 0xfc00, 0x0800)
__CSKY_INSN_FUNCS(bf16, 0xfc00, 0x0c00)
__CSKY_INSN_FUNCS(jmp16, 0xffc3, 0x7800)
__CSKY_INSN_FUNCS(jsr16, 0xffc3, 0x7801)
__CSKY_INSN_FUNCS(lrw16, 0xfc00, 0x1000)
__CSKY_INSN_FUNCS(pop16, 0xffe0, 0x1480)
__CSKY_INSN_FUNCS(br32, 0x0000ffff, 0x0000e800)
__CSKY_INSN_FUNCS(bt32, 0x0000ffff, 0x0000e860)
__CSKY_INSN_FUNCS(bf32, 0x0000ffff, 0x0000e840)
__CSKY_INSN_FUNCS(jmp32, 0xffffffe0, 0x0000e8c0)
__CSKY_INSN_FUNCS(jsr32, 0xffffffe0, 0x0000e8e0)
__CSKY_INSN_FUNCS(lrw32, 0x0000ffe0, 0x0000ea80)
__CSKY_INSN_FUNCS(pop32, 0xfe00ffff, 0x0000ebc0)
__CSKY_INSN_FUNCS(bez32, 0x0000ffe0, 0x0000e900)
__CSKY_INSN_FUNCS(bnez32, 0x0000ffe0, 0x0000e920)
__CSKY_INSN_FUNCS(bnezad32, 0x0000ffe0, 0x0000e820)
__CSKY_INSN_FUNCS(bhsz32, 0x0000ffe0, 0x0000e9a0)
__CSKY_INSN_FUNCS(bhz32, 0x0000ffe0, 0x0000e940)
__CSKY_INSN_FUNCS(blsz32, 0x0000ffe0, 0x0000e960)
__CSKY_INSN_FUNCS(blz32, 0x0000ffe0, 0x0000e980)
__CSKY_INSN_FUNCS(bsr32, 0x0000fc00, 0x0000e000)
__CSKY_INSN_FUNCS(jmpi32, 0x0000ffff, 0x0000eac0)
__CSKY_INSN_FUNCS(jsri32, 0x0000ffff, 0x0000eae0)
#endif /* __CSKY_KERNEL_PROBES_SIMULATE_INSN_H */
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014-2016 Pratyush Anand <panand@redhat.com>
*/
#include <linux/highmem.h>
#include <linux/ptrace.h>
#include <linux/uprobes.h>
#include <asm/cacheflush.h>
#include "decode-insn.h"
#define UPROBE_TRAP_NR UINT_MAX
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs);
}
int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
unsigned long addr)
{
probe_opcode_t insn;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
auprobe->insn_size = is_insn32(insn) ? 4 : 2;
switch (csky_probe_decode_insn(&insn, &auprobe->api)) {
case INSN_REJECTED:
return -EINVAL;
case INSN_GOOD_NO_SLOT:
auprobe->simulate = true;
break;
default:
break;
}
return 0;
}
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
utask->autask.saved_trap_no = current->thread.trap_no;
current->thread.trap_no = UPROBE_TRAP_NR;
instruction_pointer_set(regs, utask->xol_vaddr);
user_enable_single_step(current);
return 0;
}
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
WARN_ON_ONCE(current->thread.trap_no != UPROBE_TRAP_NR);
instruction_pointer_set(regs, utask->vaddr + auprobe->insn_size);
user_disable_single_step(current);
return 0;
}
bool arch_uprobe_xol_was_trapped(struct task_struct *t)
{
if (t->thread.trap_no != UPROBE_TRAP_NR)
return true;
return false;
}
bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
probe_opcode_t insn;
unsigned long addr;
if (!auprobe->simulate)
return false;
insn = *(probe_opcode_t *)(&auprobe->insn[0]);
addr = instruction_pointer(regs);
if (auprobe->api.handler)
auprobe->api.handler(insn, addr, regs);
return true;
}
void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
/*
* Task has received a fatal signal, so reset back to probbed
* address.
*/
instruction_pointer_set(regs, utask->vaddr);
user_disable_single_step(current);
}
bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
struct pt_regs *regs)
{
if (ctx == RP_CHECK_CHAIN_CALL)
return regs->usp <= ret->stack;
else
return regs->usp < ret->stack;
}
unsigned long
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
struct pt_regs *regs)
{
unsigned long ra;
ra = regs->lr;
regs->lr = trampoline_vaddr;
return ra;
}
int arch_uprobe_exception_notify(struct notifier_block *self,
unsigned long val, void *data)
{
return NOTIFY_DONE;
}
int uprobe_breakpoint_handler(struct pt_regs *regs)
{
if (uprobe_pre_sstep_notifier(regs))
return 1;
return 0;
}
int uprobe_single_step_handler(struct pt_regs *regs)
{
if (uprobe_post_sstep_notifier(regs))
return 1;
return 0;
}
......@@ -193,6 +193,109 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
return &user_csky_view;
}
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(tls),
REG_OFFSET_NAME(lr),
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(sr),
REG_OFFSET_NAME(usp),
REG_OFFSET_NAME(orig_a0),
REG_OFFSET_NAME(a0),
REG_OFFSET_NAME(a1),
REG_OFFSET_NAME(a2),
REG_OFFSET_NAME(a3),
REG_OFFSET_NAME(regs[0]),
REG_OFFSET_NAME(regs[1]),
REG_OFFSET_NAME(regs[2]),
REG_OFFSET_NAME(regs[3]),
REG_OFFSET_NAME(regs[4]),
REG_OFFSET_NAME(regs[5]),
REG_OFFSET_NAME(regs[6]),
REG_OFFSET_NAME(regs[7]),
REG_OFFSET_NAME(regs[8]),
REG_OFFSET_NAME(regs[9]),
#if defined(__CSKYABIV2__)
REG_OFFSET_NAME(exregs[0]),
REG_OFFSET_NAME(exregs[1]),
REG_OFFSET_NAME(exregs[2]),
REG_OFFSET_NAME(exregs[3]),
REG_OFFSET_NAME(exregs[4]),
REG_OFFSET_NAME(exregs[5]),
REG_OFFSET_NAME(exregs[6]),
REG_OFFSET_NAME(exregs[7]),
REG_OFFSET_NAME(exregs[8]),
REG_OFFSET_NAME(exregs[9]),
REG_OFFSET_NAME(exregs[10]),
REG_OFFSET_NAME(exregs[11]),
REG_OFFSET_NAME(exregs[12]),
REG_OFFSET_NAME(exregs[13]),
REG_OFFSET_NAME(exregs[14]),
REG_OFFSET_NAME(rhi),
REG_OFFSET_NAME(rlo),
REG_OFFSET_NAME(dcsr),
#endif
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return (addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
void ptrace_disable(struct task_struct *child)
{
singlestep_disable(child);
......
......@@ -24,26 +24,9 @@ struct screen_info screen_info = {
};
#endif
phys_addr_t __init_memblock memblock_end_of_REG0(void)
{
return (memblock.memory.regions[0].base +
memblock.memory.regions[0].size);
}
phys_addr_t __init_memblock memblock_start_of_REG1(void)
{
return memblock.memory.regions[1].base;
}
size_t __init_memblock memblock_size_of_REG1(void)
{
return memblock.memory.regions[1].size;
}
static void __init csky_memblock_init(void)
{
unsigned long zone_size[MAX_NR_ZONES];
unsigned long zhole_size[MAX_NR_ZONES];
signed long size;
memblock_reserve(__pa(_stext), _end - _stext);
......@@ -54,54 +37,36 @@ static void __init csky_memblock_init(void)
memblock_dump_all();
memset(zone_size, 0, sizeof(zone_size));
memset(zhole_size, 0, sizeof(zhole_size));
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
max_low_pfn = PFN_UP(memblock_end_of_REG0());
if (max_low_pfn == 0)
max_low_pfn = max_pfn;
max_low_pfn = max_pfn = PFN_DOWN(memblock_end_of_DRAM());
size = max_pfn - min_low_pfn;
if (memblock.memory.cnt > 1) {
zone_size[ZONE_NORMAL] =
PFN_DOWN(memblock_start_of_REG1()) - min_low_pfn;
zhole_size[ZONE_NORMAL] =
PFN_DOWN(memblock_start_of_REG1()) - max_low_pfn;
if (size <= PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET))
zone_size[ZONE_NORMAL] = size;
else if (size < PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET)) {
zone_size[ZONE_NORMAL] =
PFN_DOWN(SSEG_SIZE - PHYS_OFFSET_OFFSET);
max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
} else {
if (size <= PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET))
zone_size[ZONE_NORMAL] = max_pfn - min_low_pfn;
else {
zone_size[ZONE_NORMAL] =
zone_size[ZONE_NORMAL] =
PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
}
max_low_pfn = min_low_pfn + zone_size[ZONE_NORMAL];
write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE);
}
#ifdef CONFIG_HIGHMEM
size = 0;
if (memblock.memory.cnt > 1) {
size = PFN_DOWN(memblock_size_of_REG1());
highstart_pfn = PFN_DOWN(memblock_start_of_REG1());
} else {
size = max_pfn - min_low_pfn -
PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
highstart_pfn = min_low_pfn +
PFN_DOWN(LOWMEM_LIMIT - PHYS_OFFSET_OFFSET);
}
if (size > 0)
zone_size[ZONE_HIGHMEM] = size;
zone_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
highend_pfn = max_pfn;
highstart_pfn = max_low_pfn;
highend_pfn = max_pfn;
#endif
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
dma_contiguous_reserve(0);
free_area_init_node(0, zone_size, min_low_pfn, zhole_size);
free_area_init_node(0, zone_size, min_low_pfn, NULL);
}
void __init setup_arch(char **cmdline_p)
......
......@@ -175,6 +175,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *oldset = sigmask_to_save();
int ret;
rseq_signal_deliver(ksig, regs);
/* Are we from a system call? */
if (in_syscall(regs)) {
/* Avoid additional syscall restarting via ret_from_exception */
......@@ -251,6 +253,9 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs,
unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
/* Handle pending signal delivery */
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
......@@ -258,5 +263,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
rseq_handle_notify_resume(NULL, regs);
}
}
......@@ -22,6 +22,9 @@
#include <asm/sections.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#ifdef CONFIG_CPU_HAS_FPU
#include <abi/fpu.h>
#endif
struct ipi_data_struct {
unsigned long bits ____cacheline_aligned;
......@@ -156,6 +159,8 @@ volatile unsigned int secondary_hint;
volatile unsigned int secondary_ccr;
volatile unsigned int secondary_stack;
unsigned long secondary_msa1;
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
unsigned long mask = 1 << cpu;
......@@ -164,6 +169,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
(unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8;
secondary_hint = mfcr("cr31");
secondary_ccr = mfcr("cr18");
secondary_msa1 = read_mmu_msa1();
/*
* Because other CPUs are in reset status, we must flush data
......
......@@ -14,6 +14,7 @@
#include <linux/kallsyms.h>
#include <linux/rtc.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <asm/setup.h>
#include <asm/traps.h>
......@@ -109,14 +110,14 @@ void buserr(struct pt_regs *regs)
force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc);
}
#define USR_BKPT 0x1464
asmlinkage void trap_c(struct pt_regs *regs)
{
int sig;
unsigned long vector;
siginfo_t info;
struct task_struct *tsk = current;
vector = (mfcr("psr") >> 16) & 0xff;
vector = (regs->sr >> 16) & 0xff;
switch (vector) {
case VEC_ZERODIV:
......@@ -125,10 +126,27 @@ asmlinkage void trap_c(struct pt_regs *regs)
break;
/* ptrace */
case VEC_TRACE:
#ifdef CONFIG_KPROBES
if (kprobe_single_step_handler(regs))
return;
#endif
#ifdef CONFIG_UPROBES
if (uprobe_single_step_handler(regs))
return;
#endif
info.si_code = TRAP_TRACE;
sig = SIGTRAP;
break;
case VEC_ILLEGAL:
tsk->thread.trap_no = vector;
#ifdef CONFIG_KPROBES
if (kprobe_breakpoint_handler(regs))
return;
#endif
#ifdef CONFIG_UPROBES
if (uprobe_breakpoint_handler(regs))
return;
#endif
die_if_kernel("Kernel mode ILLEGAL", regs, vector);
#ifndef CONFIG_CPU_NO_USER_BKPT
if (*(uint16_t *)instruction_pointer(regs) != USR_BKPT)
......@@ -146,16 +164,20 @@ asmlinkage void trap_c(struct pt_regs *regs)
sig = SIGTRAP;
break;
case VEC_ACCESS:
tsk->thread.trap_no = vector;
return buserr(regs);
#ifdef CONFIG_CPU_NEED_SOFTALIGN
case VEC_ALIGN:
tsk->thread.trap_no = vector;
return csky_alignment(regs);
#endif
#ifdef CONFIG_CPU_HAS_FPU
case VEC_FPE:
tsk->thread.trap_no = vector;
die_if_kernel("Kernel mode FPE", regs, vector);
return fpu_fpe(regs);
case VEC_PRIV:
tsk->thread.trap_no = vector;
die_if_kernel("Kernel mode PRIV", regs, vector);
if (fpu_libc_helper(regs))
return;
......@@ -164,5 +186,8 @@ asmlinkage void trap_c(struct pt_regs *regs)
sig = SIGSEGV;
break;
}
tsk->thread.trap_no = vector;
send_sig(sig, current, 0);
}
......@@ -7,8 +7,12 @@
#include <asm/cache.h>
#include <asm/barrier.h>
/* for L1-cache */
#define INS_CACHE (1 << 0)
#define DATA_CACHE (1 << 1)
#define CACHE_INV (1 << 4)
#define CACHE_CLR (1 << 5)
#define CACHE_OMS (1 << 6)
void local_icache_inv_all(void *priv)
{
......@@ -16,11 +20,6 @@ void local_icache_inv_all(void *priv)
sync_is();
}
void icache_inv_all(void)
{
on_each_cpu(local_icache_inv_all, NULL, 1);
}
#ifdef CONFIG_CPU_HAS_ICACHE_INS
void icache_inv_range(unsigned long start, unsigned long end)
{
......@@ -31,9 +30,43 @@ void icache_inv_range(unsigned long start, unsigned long end)
sync_is();
}
#else
struct cache_range {
unsigned long start;
unsigned long end;
};
static DEFINE_SPINLOCK(cache_lock);
static inline void cache_op_line(unsigned long i, unsigned int val)
{
mtcr("cr22", i);
mtcr("cr17", val);
}
void local_icache_inv_range(void *priv)
{
struct cache_range *param = priv;
unsigned long i = param->start & ~(L1_CACHE_BYTES - 1);
unsigned long flags;
spin_lock_irqsave(&cache_lock, flags);
for (; i < param->end; i += L1_CACHE_BYTES)
cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS);
spin_unlock_irqrestore(&cache_lock, flags);
sync_is();
}
void icache_inv_range(unsigned long start, unsigned long end)
{
icache_inv_all();
struct cache_range param = { start, end };
if (irqs_disabled())
local_icache_inv_range(&param);
else
on_each_cpu(local_icache_inv_range, &param, 1);
}
#endif
......
......@@ -18,6 +18,7 @@
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
......@@ -53,6 +54,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
int fault;
unsigned long address = mmu_meh & PAGE_MASK;
if (kprobe_page_fault(regs, tsk->thread.trap_no))
return;
si_code = SEGV_MAPERR;
#ifndef CONFIG_CPU_HAS_TLBI
......@@ -179,11 +183,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return;
}
no_context:
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs))
return;
......@@ -198,6 +205,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
die_if_kernel("Oops", regs, write);
out_of_memory:
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
......@@ -206,6 +215,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
return;
do_sigbus:
tsk->thread.trap_no = (regs->sr >> 16) & 0xff;
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment