Commit c530e2f0 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Ingo Molnar

powerpc/kprobes: Remove jprobe powerpc implementation

Remove arch dependent setjump/longjump functions
and unused fields in kprobe_ctlblk for jprobes
from arch/powerpc. This also reverts commits
related __is_active_jprobe() function.
Signed-off-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Acked-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: linux-arch@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Link: https://lore.kernel.org/lkml/152942445234.15209.12868722778364739753.stgit@devboxSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 2efb75cd
......@@ -88,7 +88,6 @@ struct prev_kprobe {
struct kprobe_ctlblk {
unsigned long kprobe_status;
unsigned long kprobe_saved_msr;
struct pt_regs jprobe_saved_regs;
struct prev_kprobe prev_kprobe;
};
......@@ -104,7 +103,6 @@ extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
extern int kprobe_handler(struct pt_regs *regs);
extern int kprobe_post_handler(struct pt_regs *regs);
#ifdef CONFIG_KPROBES_ON_FTRACE
extern int __is_active_jprobe(unsigned long addr);
extern int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb);
#else
......
......@@ -25,21 +25,6 @@
#include <linux/preempt.h>
#include <linux/ftrace.h>
/*
* This is called from ftrace code after invoking registered handlers to
* disambiguate regs->nip changes done by jprobes and livepatch. We check if
* there is an active jprobe at the provided address (mcount location).
*/
int __is_active_jprobe(unsigned long addr)
{
if (!preemptible()) {
struct kprobe *p = raw_cpu_read(current_kprobe);
return (p && (unsigned long)p->addr == addr) ? 1 : 0;
}
return 0;
}
static nokprobe_inline
int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb, unsigned long orig_nip)
......
......@@ -611,60 +611,6 @@ unsigned long arch_deref_entry_point(void *entry)
}
NOKPROBE_SYMBOL(arch_deref_entry_point);
int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
/* setup return addr to the jprobe handler routine */
regs->nip = arch_deref_entry_point(jp->entry);
#ifdef PPC64_ELF_ABI_v2
regs->gpr[12] = (unsigned long)jp->entry;
#elif defined(PPC64_ELF_ABI_v1)
regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
#endif
/*
* jprobes use jprobe_return() which skips the normal return
* path of the function, and this messes up the accounting of the
* function graph tracer.
*
* Pause function graph tracing while performing the jprobe function.
*/
pause_graph_tracing();
return 1;
}
NOKPROBE_SYMBOL(setjmp_pre_handler);
void __used jprobe_return(void)
{
asm volatile("jprobe_return_trap:\n"
"trap\n"
::: "memory");
}
NOKPROBE_SYMBOL(jprobe_return);
int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (regs->nip != ppc_kallsyms_lookup_name("jprobe_return_trap")) {
pr_debug("longjmp_break_handler NIP (0x%lx) does not match jprobe_return_trap (0x%lx)\n",
regs->nip, ppc_kallsyms_lookup_name("jprobe_return_trap"));
return 0;
}
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
/* It's OK to start function graph tracing again */
unpause_graph_tracing();
preempt_enable_no_resched();
return 1;
}
NOKPROBE_SYMBOL(longjmp_break_handler);
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
......
......@@ -104,39 +104,13 @@ ftrace_regs_call:
bl ftrace_stub
nop
/* Load the possibly modified NIP */
ld r15, _NIP(r1)
/* Load ctr with the possibly modified NIP */
ld r3, _NIP(r1)
mtctr r3
#ifdef CONFIG_LIVEPATCH
cmpd r14, r15 /* has NIP been altered? */
cmpd r14, r3 /* has NIP been altered? */
#endif
#if defined(CONFIG_LIVEPATCH) && defined(CONFIG_KPROBES_ON_FTRACE)
/* NIP has not been altered, skip over further checks */
beq 1f
/* Check if there is an active jprobe on us */
subi r3, r14, 4
bl __is_active_jprobe
nop
/*
* If r3 == 1, then this is a kprobe/jprobe.
* else, this is livepatched function.
*
* The conditional branch for livepatch_handler below will use the
* result of this comparison. For kprobe/jprobe, we just need to branch to
* the new NIP, not call livepatch_handler. The branch below is bne, so we
* want CR0[EQ] to be true if this is a kprobe/jprobe. Which means we want
* CR0[EQ] = (r3 == 1).
*/
cmpdi r3, 1
1:
#endif
/* Load CTR with the possibly modified NIP */
mtctr r15
/* Restore gprs */
REST_GPR(0,r1)
REST_10GPRS(2,r1)
......@@ -154,10 +128,7 @@ ftrace_regs_call:
addi r1, r1, SWITCH_FRAME_SIZE
#ifdef CONFIG_LIVEPATCH
/*
* Based on the cmpd or cmpdi above, if the NIP was altered and we're
* not on a kprobe/jprobe, then handle livepatch.
*/
/* Based on the cmpd above, if the NIP was altered handle livepatch */
bne- livepatch_handler
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment