Commit f72180cc authored by Naveen N. Rao's avatar Naveen N. Rao Committed by Michael Ellerman

powerpc/kprobes: Do not disable interrupts for optprobes and kprobes_on_ftrace

Per Documentation/kprobes.txt, we don't necessarily need to disable
interrupts before invoking the kprobe handlers. Masami submitted
similar changes for x86 via commit a19b2e3d ("kprobes/x86: Remove
IRQ disabling from ftrace-based/optimized kprobes"). Do the same for
powerpc.
Signed-off-by: default avatarNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 8a2d71a3
...@@ -75,11 +75,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, ...@@ -75,11 +75,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
{ {
struct kprobe *p; struct kprobe *p;
struct kprobe_ctlblk *kcb; struct kprobe_ctlblk *kcb;
unsigned long flags;
/* Disable irq for emulating a breakpoint and avoiding preempt */
local_irq_save(flags);
hard_irq_disable();
preempt_disable(); preempt_disable();
p = get_kprobe((kprobe_opcode_t *)nip); p = get_kprobe((kprobe_opcode_t *)nip);
...@@ -105,16 +101,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip, ...@@ -105,16 +101,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
else { else {
/* /*
* If pre_handler returns !0, it sets regs->nip and * If pre_handler returns !0, it sets regs->nip and
* resets current kprobe. In this case, we still need * resets current kprobe. In this case, we should not
* to restore irq, but not preemption. * re-enable preemption.
*/ */
local_irq_restore(flags);
return; return;
} }
} }
end: end:
preempt_enable_no_resched(); preempt_enable_no_resched();
local_irq_restore(flags);
} }
NOKPROBE_SYMBOL(kprobe_ftrace_handler); NOKPROBE_SYMBOL(kprobe_ftrace_handler);
......
...@@ -115,14 +115,10 @@ static unsigned long can_optimize(struct kprobe *p) ...@@ -115,14 +115,10 @@ static unsigned long can_optimize(struct kprobe *p)
static void optimized_callback(struct optimized_kprobe *op, static void optimized_callback(struct optimized_kprobe *op,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned long flags;
/* This is possible if op is under delayed unoptimizing */ /* This is possible if op is under delayed unoptimizing */
if (kprobe_disabled(&op->kp)) if (kprobe_disabled(&op->kp))
return; return;
local_irq_save(flags);
hard_irq_disable();
preempt_disable(); preempt_disable();
if (kprobe_running()) { if (kprobe_running()) {
...@@ -135,13 +131,7 @@ static void optimized_callback(struct optimized_kprobe *op, ...@@ -135,13 +131,7 @@ static void optimized_callback(struct optimized_kprobe *op,
__this_cpu_write(current_kprobe, NULL); __this_cpu_write(current_kprobe, NULL);
} }
/*
* No need for an explicit __hard_irq_enable() here.
* local_irq_restore() will re-enable interrupts,
* if they were hard disabled.
*/
preempt_enable_no_resched(); preempt_enable_no_resched();
local_irq_restore(flags);
} }
NOKPROBE_SYMBOL(optimized_callback); NOKPROBE_SYMBOL(optimized_callback);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment