Commit 657480d9 authored by Sven Schnelle's avatar Sven Schnelle Committed by Vasily Gorbik

s390: support KPROBES_ON_FTRACE

Instead of using our own kprobes-on-ftrace handling convert the
code to support KPROBES_ON_FTRACE.
Signed-off-by: default avatarSven Schnelle <svens@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent 5f490a52
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
| parisc: | ok | | parisc: | ok |
| powerpc: | ok | | powerpc: | ok |
| riscv: | TODO | | riscv: | TODO |
| s390: | TODO | | s390: | ok |
| sh: | TODO | | sh: | TODO |
| sparc: | TODO | | sparc: | TODO |
| um: | TODO | | um: | TODO |
......
...@@ -156,6 +156,7 @@ config S390 ...@@ -156,6 +156,7 @@ config S390
select HAVE_KERNEL_UNCOMPRESSED select HAVE_KERNEL_UNCOMPRESSED
select HAVE_KERNEL_XZ select HAVE_KERNEL_XZ
select HAVE_KPROBES select HAVE_KPROBES
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES select HAVE_KRETPROBES
select HAVE_KVM select HAVE_KVM
select HAVE_LIVEPATCH select HAVE_LIVEPATCH
......
...@@ -54,7 +54,6 @@ typedef u16 kprobe_opcode_t; ...@@ -54,7 +54,6 @@ typedef u16 kprobe_opcode_t;
struct arch_specific_insn { struct arch_specific_insn {
/* copy of original instruction */ /* copy of original instruction */
kprobe_opcode_t *insn; kprobe_opcode_t *insn;
unsigned int is_ftrace_insn : 1;
}; };
struct prev_kprobe { struct prev_kprobe {
......
...@@ -72,15 +72,6 @@ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn) ...@@ -72,15 +72,6 @@ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
#endif #endif
} }
static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
{
#ifdef CONFIG_KPROBES
if (insn->opc == BREAKPOINT_INSTRUCTION)
return 1;
#endif
return 0;
}
static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn) static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
{ {
#ifdef CONFIG_KPROBES #ifdef CONFIG_KPROBES
...@@ -114,16 +105,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, ...@@ -114,16 +105,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
/* Initial code replacement */ /* Initial code replacement */
ftrace_generate_orig_insn(&orig); ftrace_generate_orig_insn(&orig);
ftrace_generate_nop_insn(&new); ftrace_generate_nop_insn(&new);
} else if (is_kprobe_on_ftrace(&old)) {
/*
* If we find a breakpoint instruction, a kprobe has been
* placed at the beginning of the function. We write the
* constant KPROBE_ON_FTRACE_NOP into the remaining four
* bytes of the original instruction so that the kprobes
* handler can execute a nop, if it reaches this breakpoint.
*/
ftrace_generate_kprobe_call_insn(&orig);
ftrace_generate_kprobe_nop_insn(&new);
} else { } else {
/* Replace ftrace call with a nop. */ /* Replace ftrace call with a nop. */
ftrace_generate_call_insn(&orig, rec->ip); ftrace_generate_call_insn(&orig, rec->ip);
...@@ -142,21 +123,10 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) ...@@ -142,21 +123,10 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old))) if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT; return -EFAULT;
if (is_kprobe_on_ftrace(&old)) { /* Replace nop with an ftrace call. */
/* ftrace_generate_nop_insn(&orig);
* If we find a breakpoint instruction, a kprobe has been ftrace_generate_call_insn(&new, rec->ip);
* placed at the beginning of the function. We write the
* constant KPROBE_ON_FTRACE_CALL into the remaining four
* bytes of the original instruction so that the kprobes
* handler can execute a brasl if it reaches this breakpoint.
*/
ftrace_generate_kprobe_nop_insn(&orig);
ftrace_generate_kprobe_call_insn(&new);
} else {
/* Replace nop with an ftrace call. */
ftrace_generate_nop_insn(&orig);
ftrace_generate_call_insn(&new, rec->ip);
}
/* Verify that the to be replaced code matches what we expect. */ /* Verify that the to be replaced code matches what we expect. */
if (memcmp(&orig, &old, sizeof(old))) if (memcmp(&orig, &old, sizeof(old)))
return -EINVAL; return -EINVAL;
...@@ -241,3 +211,45 @@ int ftrace_disable_ftrace_graph_caller(void) ...@@ -241,3 +211,45 @@ int ftrace_disable_ftrace_graph_caller(void)
} }
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_KPROBES_ON_FTRACE
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb;
struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
return;
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
return;
}
__this_cpu_write(current_kprobe, p);
kcb = get_kprobe_ctlblk();
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
instruction_pointer_set(regs, ip);
if (!p->pre_handler || !p->pre_handler(p, regs)) {
instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
}
__this_cpu_write(current_kprobe, NULL);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
return 0;
}
#endif
...@@ -56,21 +56,10 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = { ...@@ -56,21 +56,10 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = {
static void copy_instruction(struct kprobe *p) static void copy_instruction(struct kprobe *p)
{ {
unsigned long ip = (unsigned long) p->addr;
s64 disp, new_disp; s64 disp, new_disp;
u64 addr, new_addr; u64 addr, new_addr;
if (ftrace_location(ip) == ip) { memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
/*
* If kprobes patches the instruction that is morphed by
* ftrace make sure that kprobes always sees the branch
* "jg .+24" that skips the mcount block or the "brcl 0,0"
* in case of hotpatch.
*/
ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
p->ainsn.is_ftrace_insn = 1;
} else
memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
p->opcode = p->ainsn.insn[0]; p->opcode = p->ainsn.insn[0];
if (!probe_is_insn_relative_long(p->ainsn.insn)) if (!probe_is_insn_relative_long(p->ainsn.insn))
return; return;
...@@ -136,11 +125,6 @@ int arch_prepare_kprobe(struct kprobe *p) ...@@ -136,11 +125,6 @@ int arch_prepare_kprobe(struct kprobe *p)
} }
NOKPROBE_SYMBOL(arch_prepare_kprobe); NOKPROBE_SYMBOL(arch_prepare_kprobe);
int arch_check_ftrace_location(struct kprobe *p)
{
return 0;
}
struct swap_insn_args { struct swap_insn_args {
struct kprobe *p; struct kprobe *p;
unsigned int arm_kprobe : 1; unsigned int arm_kprobe : 1;
...@@ -149,28 +133,11 @@ struct swap_insn_args { ...@@ -149,28 +133,11 @@ struct swap_insn_args {
static int swap_instruction(void *data) static int swap_instruction(void *data)
{ {
struct swap_insn_args *args = data; struct swap_insn_args *args = data;
struct ftrace_insn new_insn, *insn;
struct kprobe *p = args->p; struct kprobe *p = args->p;
size_t len; u16 opc;
new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode; opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
len = sizeof(new_insn.opc); s390_kernel_write(p->addr, &opc, sizeof(opc));
if (!p->ainsn.is_ftrace_insn)
goto skip_ftrace;
len = sizeof(new_insn);
insn = (struct ftrace_insn *) p->addr;
if (args->arm_kprobe) {
if (is_ftrace_nop(insn))
new_insn.disp = KPROBE_ON_FTRACE_NOP;
else
new_insn.disp = KPROBE_ON_FTRACE_CALL;
} else {
ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
if (insn->disp == KPROBE_ON_FTRACE_NOP)
ftrace_generate_nop_insn(&new_insn);
}
skip_ftrace:
s390_kernel_write(p->addr, &new_insn, len);
return 0; return 0;
} }
NOKPROBE_SYMBOL(swap_instruction); NOKPROBE_SYMBOL(swap_instruction);
...@@ -464,24 +431,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs) ...@@ -464,24 +431,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
unsigned long ip = regs->psw.addr; unsigned long ip = regs->psw.addr;
int fixup = probe_get_fixup_type(p->ainsn.insn); int fixup = probe_get_fixup_type(p->ainsn.insn);
/* Check if the kprobes location is an enabled ftrace caller */
if (p->ainsn.is_ftrace_insn) {
struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
struct ftrace_insn call_insn;
ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
/*
* A kprobe on an enabled ftrace call site actually single
* stepped an unconditional branch (ftrace nop equivalent).
* Now we need to fixup things and pretend that a brasl r0,...
* was executed instead.
*/
if (insn->disp == KPROBE_ON_FTRACE_CALL) {
ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
}
}
if (fixup & FIXUP_PSW_NORMAL) if (fixup & FIXUP_PSW_NORMAL)
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
......
...@@ -42,6 +42,9 @@ ENTRY(ftrace_caller) ...@@ -42,6 +42,9 @@ ENTRY(ftrace_caller)
.globl ftrace_regs_caller .globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller .set ftrace_regs_caller,ftrace_caller
stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
lghi %r14,0 # save condition code
ipm %r14 # don't put any instructions
sllg %r14,%r14,16 # clobbering CC before this point
lgr %r1,%r15 lgr %r1,%r15
#if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT)) #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
aghi %r0,MCOUNT_RETURN_FIXUP aghi %r0,MCOUNT_RETURN_FIXUP
...@@ -54,6 +57,9 @@ ENTRY(ftrace_caller) ...@@ -54,6 +57,9 @@ ENTRY(ftrace_caller)
# allocate pt_regs and stack frame for ftrace_trace_function # allocate pt_regs and stack frame for ftrace_trace_function
aghi %r15,-STACK_FRAME_SIZE aghi %r15,-STACK_FRAME_SIZE
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
stg %r14,(STACK_PTREGS_PSW)(%r15)
lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
stosm (STACK_PTREGS_PSW)(%r15),0
aghi %r1,-TRACED_FUNC_FRAME_SIZE aghi %r1,-TRACED_FUNC_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15) stg %r1,__SF_BACKCHAIN(%r15)
stg %r0,(STACK_PTREGS_PSW+8)(%r15) stg %r0,(STACK_PTREGS_PSW+8)(%r15)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment