Commit e6584523 authored by Ananth N Mavinakayanahalli's avatar Ananth N Mavinakayanahalli Committed by Linus Torvalds

[PATCH] Kprobes: Track kprobe on a per_cpu basis - base changes

Changes to the base kprobe infrastructure to track kprobe execution on a
per-cpu basis.
Signed-off-by: default avatarAnanth N Mavinakayanahalli <ananth@in.ibm.com>
Signed-off-by: default avatarAnil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 66ff2d06
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/kprobes.h> #include <asm/kprobes.h>
...@@ -106,6 +107,9 @@ struct jprobe { ...@@ -106,6 +107,9 @@ struct jprobe {
kprobe_opcode_t *entry; /* probe handling code to jump to */ kprobe_opcode_t *entry; /* probe handling code to jump to */
}; };
DECLARE_PER_CPU(struct kprobe *, current_kprobe);
DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
#ifdef ARCH_SUPPORTS_KRETPROBES #ifdef ARCH_SUPPORTS_KRETPROBES
extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs); extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs);
#else /* ARCH_SUPPORTS_KRETPROBES */ #else /* ARCH_SUPPORTS_KRETPROBES */
...@@ -146,13 +150,6 @@ struct kretprobe_instance { ...@@ -146,13 +150,6 @@ struct kretprobe_instance {
void lock_kprobes(void); void lock_kprobes(void);
void unlock_kprobes(void); void unlock_kprobes(void);
/* kprobe running now on this CPU? */
static inline int kprobe_running(void)
{
extern unsigned int kprobe_cpu;
return kprobe_cpu == smp_processor_id();
}
extern int arch_prepare_kprobe(struct kprobe *p); extern int arch_prepare_kprobe(struct kprobe *p);
extern void arch_copy_kprobe(struct kprobe *p); extern void arch_copy_kprobe(struct kprobe *p);
extern void arch_arm_kprobe(struct kprobe *p); extern void arch_arm_kprobe(struct kprobe *p);
...@@ -167,6 +164,22 @@ extern void free_insn_slot(kprobe_opcode_t *slot); ...@@ -167,6 +164,22 @@ extern void free_insn_slot(kprobe_opcode_t *slot);
struct kprobe *get_kprobe(void *addr); struct kprobe *get_kprobe(void *addr);
struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
/* kprobe_running() will just return the current_kprobe on this CPU */
static inline struct kprobe *kprobe_running(void)
{
return (__get_cpu_var(current_kprobe));
}
static inline void reset_current_kprobe(void)
{
__get_cpu_var(current_kprobe) = NULL;
}
static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
{
return (&__get_cpu_var(kprobe_ctlblk));
}
int register_kprobe(struct kprobe *p); int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p); void unregister_kprobe(struct kprobe *p);
int setjmp_pre_handler(struct kprobe *, struct pt_regs *); int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
...@@ -183,9 +196,9 @@ void add_rp_inst(struct kretprobe_instance *ri); ...@@ -183,9 +196,9 @@ void add_rp_inst(struct kretprobe_instance *ri);
void kprobe_flush_task(struct task_struct *tk); void kprobe_flush_task(struct task_struct *tk);
void recycle_rp_inst(struct kretprobe_instance *ri); void recycle_rp_inst(struct kretprobe_instance *ri);
#else /* CONFIG_KPROBES */ #else /* CONFIG_KPROBES */
static inline int kprobe_running(void) static inline struct kprobe *kprobe_running(void)
{ {
return 0; return NULL;
} }
static inline int register_kprobe(struct kprobe *p) static inline int register_kprobe(struct kprobe *p)
{ {
......
...@@ -51,7 +51,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; ...@@ -51,7 +51,7 @@ static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
unsigned int kprobe_cpu = NR_CPUS; unsigned int kprobe_cpu = NR_CPUS;
static DEFINE_SPINLOCK(kprobe_lock); static DEFINE_SPINLOCK(kprobe_lock);
static struct kprobe *curr_kprobe; static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
/* /*
* kprobe->ainsn.insn points to the copy of the instruction to be * kprobe->ainsn.insn points to the copy of the instruction to be
...@@ -188,6 +188,17 @@ void __kprobes unlock_kprobes(void) ...@@ -188,6 +188,17 @@ void __kprobes unlock_kprobes(void)
local_irq_restore(flags); local_irq_restore(flags);
} }
/* We have preemption disabled.. so it is safe to use __ versions */
static inline void set_kprobe_instance(struct kprobe *kp)
{
__get_cpu_var(kprobe_instance) = kp;
}
static inline void reset_kprobe_instance(void)
{
__get_cpu_var(kprobe_instance) = NULL;
}
/* You have to be holding the kprobe_lock */ /* You have to be holding the kprobe_lock */
struct kprobe __kprobes *get_kprobe(void *addr) struct kprobe __kprobes *get_kprobe(void *addr)
{ {
...@@ -213,11 +224,11 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -213,11 +224,11 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
list_for_each_entry(kp, &p->list, list) { list_for_each_entry(kp, &p->list, list) {
if (kp->pre_handler) { if (kp->pre_handler) {
curr_kprobe = kp; set_kprobe_instance(kp);
if (kp->pre_handler(kp, regs)) if (kp->pre_handler(kp, regs))
return 1; return 1;
} }
curr_kprobe = NULL; reset_kprobe_instance();
} }
return 0; return 0;
} }
...@@ -229,9 +240,9 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, ...@@ -229,9 +240,9 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
list_for_each_entry(kp, &p->list, list) { list_for_each_entry(kp, &p->list, list) {
if (kp->post_handler) { if (kp->post_handler) {
curr_kprobe = kp; set_kprobe_instance(kp);
kp->post_handler(kp, regs, flags); kp->post_handler(kp, regs, flags);
curr_kprobe = NULL; reset_kprobe_instance();
} }
} }
return; return;
...@@ -240,12 +251,14 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, ...@@ -240,12 +251,14 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
int trapnr) int trapnr)
{ {
struct kprobe *cur = __get_cpu_var(kprobe_instance);
/* /*
* if we faulted "during" the execution of a user specified * if we faulted "during" the execution of a user specified
* probe handler, invoke just that probe's fault handler * probe handler, invoke just that probe's fault handler
*/ */
if (curr_kprobe && curr_kprobe->fault_handler) { if (cur && cur->fault_handler) {
if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr)) if (cur->fault_handler(cur, regs, trapnr))
return 1; return 1;
} }
return 0; return 0;
...@@ -253,15 +266,15 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, ...@@ -253,15 +266,15 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
{ {
struct kprobe *kp = curr_kprobe; struct kprobe *cur = __get_cpu_var(kprobe_instance);
if (curr_kprobe && kp->break_handler) { int ret = 0;
if (kp->break_handler(kp, regs)) {
curr_kprobe = NULL; if (cur && cur->break_handler) {
return 1; if (cur->break_handler(cur, regs))
} ret = 1;
} }
curr_kprobe = NULL; reset_kprobe_instance();
return 0; return ret;
} }
struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment