Commit ab09e95c authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

x86/kprobes: Convert to text-patching.h

Convert kprobes to the new text-poke naming.
Tested-by: default avatarAlexei Starovoitov <ast@kernel.org>
Tested-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Acked-by: default avatarAlexei Starovoitov <ast@kernel.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20191111132458.103959370@infradead.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 38ebd8d1
...@@ -11,12 +11,11 @@ ...@@ -11,12 +11,11 @@
#include <asm-generic/kprobes.h> #include <asm-generic/kprobes.h>
#define BREAKPOINT_INSTRUCTION 0xcc
#ifdef CONFIG_KPROBES #ifdef CONFIG_KPROBES
#include <linux/types.h> #include <linux/types.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <asm/text-patching.h>
#include <asm/insn.h> #include <asm/insn.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT #define __ARCH_WANT_KPROBES_INSN_SLOT
...@@ -25,10 +24,7 @@ struct pt_regs; ...@@ -25,10 +24,7 @@ struct pt_regs;
struct kprobe; struct kprobe;
typedef u8 kprobe_opcode_t; typedef u8 kprobe_opcode_t;
#define RELATIVEJUMP_OPCODE 0xe9
#define RELATIVEJUMP_SIZE 5
#define RELATIVECALL_OPCODE 0xe8
#define RELATIVE_ADDR_SIZE 4
#define MAX_STACK_SIZE 64 #define MAX_STACK_SIZE 64
#define CUR_STACK_SIZE(ADDR) \ #define CUR_STACK_SIZE(ADDR) \
(current_top_of_stack() - (unsigned long)(ADDR)) (current_top_of_stack() - (unsigned long)(ADDR))
...@@ -43,11 +39,11 @@ extern __visible kprobe_opcode_t optprobe_template_entry[]; ...@@ -43,11 +39,11 @@ extern __visible kprobe_opcode_t optprobe_template_entry[];
extern __visible kprobe_opcode_t optprobe_template_val[]; extern __visible kprobe_opcode_t optprobe_template_val[];
extern __visible kprobe_opcode_t optprobe_template_call[]; extern __visible kprobe_opcode_t optprobe_template_call[];
extern __visible kprobe_opcode_t optprobe_template_end[]; extern __visible kprobe_opcode_t optprobe_template_end[];
#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE) #define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + DISP32_SIZE)
#define MAX_OPTINSN_SIZE \ #define MAX_OPTINSN_SIZE \
(((unsigned long)optprobe_template_end - \ (((unsigned long)optprobe_template_end - \
(unsigned long)optprobe_template_entry) + \ (unsigned long)optprobe_template_entry) + \
MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE) MAX_OPTIMIZED_LENGTH + JMP32_INSN_SIZE)
extern const int kretprobe_blacklist_size; extern const int kretprobe_blacklist_size;
...@@ -73,7 +69,7 @@ struct arch_specific_insn { ...@@ -73,7 +69,7 @@ struct arch_specific_insn {
struct arch_optimized_insn { struct arch_optimized_insn {
/* copy of the original instructions */ /* copy of the original instructions */
kprobe_opcode_t copied_insn[RELATIVE_ADDR_SIZE]; kprobe_opcode_t copied_insn[DISP32_SIZE];
/* detour code buffer */ /* detour code buffer */
kprobe_opcode_t *insn; kprobe_opcode_t *insn;
/* the size of instructions copied to detour code buffer */ /* the size of instructions copied to detour code buffer */
......
...@@ -61,6 +61,8 @@ extern void text_poke_finish(void); ...@@ -61,6 +61,8 @@ extern void text_poke_finish(void);
#define JMP8_INSN_SIZE 2 #define JMP8_INSN_SIZE 2
#define JMP8_INSN_OPCODE 0xEB #define JMP8_INSN_OPCODE 0xEB
#define DISP32_SIZE 4
static inline int text_opcode_size(u8 opcode) static inline int text_opcode_size(u8 opcode)
{ {
int size = 0; int size = 0;
......
...@@ -119,14 +119,14 @@ __synthesize_relative_insn(void *dest, void *from, void *to, u8 op) ...@@ -119,14 +119,14 @@ __synthesize_relative_insn(void *dest, void *from, void *to, u8 op)
/* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
void synthesize_reljump(void *dest, void *from, void *to) void synthesize_reljump(void *dest, void *from, void *to)
{ {
__synthesize_relative_insn(dest, from, to, RELATIVEJUMP_OPCODE); __synthesize_relative_insn(dest, from, to, JMP32_INSN_OPCODE);
} }
NOKPROBE_SYMBOL(synthesize_reljump); NOKPROBE_SYMBOL(synthesize_reljump);
/* Insert a call instruction at address 'from', which calls address 'to'.*/ /* Insert a call instruction at address 'from', which calls address 'to'.*/
void synthesize_relcall(void *dest, void *from, void *to) void synthesize_relcall(void *dest, void *from, void *to)
{ {
__synthesize_relative_insn(dest, from, to, RELATIVECALL_OPCODE); __synthesize_relative_insn(dest, from, to, CALL_INSN_OPCODE);
} }
NOKPROBE_SYMBOL(synthesize_relcall); NOKPROBE_SYMBOL(synthesize_relcall);
...@@ -301,7 +301,7 @@ static int can_probe(unsigned long paddr) ...@@ -301,7 +301,7 @@ static int can_probe(unsigned long paddr)
* Another debugging subsystem might insert this breakpoint. * Another debugging subsystem might insert this breakpoint.
* In that case, we can't recover it. * In that case, we can't recover it.
*/ */
if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
return 0; return 0;
addr += insn.length; addr += insn.length;
} }
...@@ -356,7 +356,7 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn) ...@@ -356,7 +356,7 @@ int __copy_instruction(u8 *dest, u8 *src, u8 *real, struct insn *insn)
return 0; return 0;
/* Another subsystem puts a breakpoint, failed to recover */ /* Another subsystem puts a breakpoint, failed to recover */
if (insn->opcode.bytes[0] == BREAKPOINT_INSTRUCTION) if (insn->opcode.bytes[0] == INT3_INSN_OPCODE)
return 0; return 0;
/* We should not singlestep on the exception masking instructions */ /* We should not singlestep on the exception masking instructions */
...@@ -400,14 +400,14 @@ static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p, ...@@ -400,14 +400,14 @@ static int prepare_boost(kprobe_opcode_t *buf, struct kprobe *p,
int len = insn->length; int len = insn->length;
if (can_boost(insn, p->addr) && if (can_boost(insn, p->addr) &&
MAX_INSN_SIZE - len >= RELATIVEJUMP_SIZE) { MAX_INSN_SIZE - len >= JMP32_INSN_SIZE) {
/* /*
* These instructions can be executed directly if it * These instructions can be executed directly if it
* jumps back to correct address. * jumps back to correct address.
*/ */
synthesize_reljump(buf + len, p->ainsn.insn + len, synthesize_reljump(buf + len, p->ainsn.insn + len,
p->addr + insn->length); p->addr + insn->length);
len += RELATIVEJUMP_SIZE; len += JMP32_INSN_SIZE;
p->ainsn.boostable = true; p->ainsn.boostable = true;
} else { } else {
p->ainsn.boostable = false; p->ainsn.boostable = false;
...@@ -501,7 +501,7 @@ int arch_prepare_kprobe(struct kprobe *p) ...@@ -501,7 +501,7 @@ int arch_prepare_kprobe(struct kprobe *p)
void arch_arm_kprobe(struct kprobe *p) void arch_arm_kprobe(struct kprobe *p)
{ {
text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); text_poke(p->addr, ((unsigned char []){INT3_INSN_OPCODE}), 1);
} }
void arch_disarm_kprobe(struct kprobe *p) void arch_disarm_kprobe(struct kprobe *p)
...@@ -609,7 +609,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, ...@@ -609,7 +609,7 @@ static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
regs->flags |= X86_EFLAGS_TF; regs->flags |= X86_EFLAGS_TF;
regs->flags &= ~X86_EFLAGS_IF; regs->flags &= ~X86_EFLAGS_IF;
/* single step inline if the instruction is an int3 */ /* single step inline if the instruction is an int3 */
if (p->opcode == BREAKPOINT_INSTRUCTION) if (p->opcode == INT3_INSN_OPCODE)
regs->ip = (unsigned long)p->addr; regs->ip = (unsigned long)p->addr;
else else
regs->ip = (unsigned long)p->ainsn.insn; regs->ip = (unsigned long)p->ainsn.insn;
...@@ -695,7 +695,7 @@ int kprobe_int3_handler(struct pt_regs *regs) ...@@ -695,7 +695,7 @@ int kprobe_int3_handler(struct pt_regs *regs)
reset_current_kprobe(); reset_current_kprobe();
return 1; return 1;
} }
} else if (*addr != BREAKPOINT_INSTRUCTION) { } else if (*addr != INT3_INSN_OPCODE) {
/* /*
* The breakpoint instruction was removed right * The breakpoint instruction was removed right
* after we hit it. Another cpu has removed * after we hit it. Another cpu has removed
......
...@@ -38,7 +38,7 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) ...@@ -38,7 +38,7 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
long offs; long offs;
int i; int i;
for (i = 0; i < RELATIVEJUMP_SIZE; i++) { for (i = 0; i < JMP32_INSN_SIZE; i++) {
kp = get_kprobe((void *)addr - i); kp = get_kprobe((void *)addr - i);
/* This function only handles jump-optimized kprobe */ /* This function only handles jump-optimized kprobe */
if (kp && kprobe_optimized(kp)) { if (kp && kprobe_optimized(kp)) {
...@@ -62,10 +62,10 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr) ...@@ -62,10 +62,10 @@ unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
if (addr == (unsigned long)kp->addr) { if (addr == (unsigned long)kp->addr) {
buf[0] = kp->opcode; buf[0] = kp->opcode;
memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); memcpy(buf + 1, op->optinsn.copied_insn, DISP32_SIZE);
} else { } else {
offs = addr - (unsigned long)kp->addr - 1; offs = addr - (unsigned long)kp->addr - 1;
memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs); memcpy(buf, op->optinsn.copied_insn + offs, DISP32_SIZE - offs);
} }
return (unsigned long)buf; return (unsigned long)buf;
...@@ -141,8 +141,6 @@ STACK_FRAME_NON_STANDARD(optprobe_template_func); ...@@ -141,8 +141,6 @@ STACK_FRAME_NON_STANDARD(optprobe_template_func);
#define TMPL_END_IDX \ #define TMPL_END_IDX \
((long)optprobe_template_end - (long)optprobe_template_entry) ((long)optprobe_template_end - (long)optprobe_template_entry)
#define INT3_SIZE sizeof(kprobe_opcode_t)
/* Optimized kprobe call back function: called from optinsn */ /* Optimized kprobe call back function: called from optinsn */
static void static void
optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
...@@ -162,7 +160,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) ...@@ -162,7 +160,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
regs->cs |= get_kernel_rpl(); regs->cs |= get_kernel_rpl();
regs->gs = 0; regs->gs = 0;
#endif #endif
regs->ip = (unsigned long)op->kp.addr + INT3_SIZE; regs->ip = (unsigned long)op->kp.addr + INT3_INSN_SIZE;
regs->orig_ax = ~0UL; regs->orig_ax = ~0UL;
__this_cpu_write(current_kprobe, &op->kp); __this_cpu_write(current_kprobe, &op->kp);
...@@ -179,7 +177,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real) ...@@ -179,7 +177,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
struct insn insn; struct insn insn;
int len = 0, ret; int len = 0, ret;
while (len < RELATIVEJUMP_SIZE) { while (len < JMP32_INSN_SIZE) {
ret = __copy_instruction(dest + len, src + len, real + len, &insn); ret = __copy_instruction(dest + len, src + len, real + len, &insn);
if (!ret || !can_boost(&insn, src + len)) if (!ret || !can_boost(&insn, src + len))
return -EINVAL; return -EINVAL;
...@@ -271,7 +269,7 @@ static int can_optimize(unsigned long paddr) ...@@ -271,7 +269,7 @@ static int can_optimize(unsigned long paddr)
return 0; return 0;
/* Check there is enough space for a relative jump. */ /* Check there is enough space for a relative jump. */
if (size - offset < RELATIVEJUMP_SIZE) if (size - offset < JMP32_INSN_SIZE)
return 0; return 0;
/* Decode instructions */ /* Decode instructions */
...@@ -290,15 +288,15 @@ static int can_optimize(unsigned long paddr) ...@@ -290,15 +288,15 @@ static int can_optimize(unsigned long paddr)
kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE);
insn_get_length(&insn); insn_get_length(&insn);
/* Another subsystem puts a breakpoint */ /* Another subsystem puts a breakpoint */
if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
return 0; return 0;
/* Recover address */ /* Recover address */
insn.kaddr = (void *)addr; insn.kaddr = (void *)addr;
insn.next_byte = (void *)(addr + insn.length); insn.next_byte = (void *)(addr + insn.length);
/* Check any instructions don't jump into target */ /* Check any instructions don't jump into target */
if (insn_is_indirect_jump(&insn) || if (insn_is_indirect_jump(&insn) ||
insn_jump_into_range(&insn, paddr + INT3_SIZE, insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
RELATIVE_ADDR_SIZE)) DISP32_SIZE))
return 0; return 0;
addr += insn.length; addr += insn.length;
} }
...@@ -374,7 +372,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, ...@@ -374,7 +372,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
* Verify if the address gap is in 2GB range, because this uses * Verify if the address gap is in 2GB range, because this uses
* a relative jump. * a relative jump.
*/ */
rel = (long)slot - (long)op->kp.addr + RELATIVEJUMP_SIZE; rel = (long)slot - (long)op->kp.addr + JMP32_INSN_SIZE;
if (abs(rel) > 0x7fffffff) { if (abs(rel) > 0x7fffffff) {
ret = -ERANGE; ret = -ERANGE;
goto err; goto err;
...@@ -401,7 +399,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, ...@@ -401,7 +399,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
/* Set returning jmp instruction at the tail of out-of-line buffer */ /* Set returning jmp instruction at the tail of out-of-line buffer */
synthesize_reljump(buf + len, slot + len, synthesize_reljump(buf + len, slot + len,
(u8 *)op->kp.addr + op->optinsn.size); (u8 *)op->kp.addr + op->optinsn.size);
len += RELATIVEJUMP_SIZE; len += JMP32_INSN_SIZE;
/* We have to use text_poke() for instruction buffer because it is RO */ /* We have to use text_poke() for instruction buffer because it is RO */
text_poke(slot, buf, len); text_poke(slot, buf, len);
...@@ -422,22 +420,22 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, ...@@ -422,22 +420,22 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op,
void arch_optimize_kprobes(struct list_head *oplist) void arch_optimize_kprobes(struct list_head *oplist)
{ {
struct optimized_kprobe *op, *tmp; struct optimized_kprobe *op, *tmp;
u8 insn_buff[RELATIVEJUMP_SIZE]; u8 insn_buff[JMP32_INSN_SIZE];
list_for_each_entry_safe(op, tmp, oplist, list) { list_for_each_entry_safe(op, tmp, oplist, list) {
s32 rel = (s32)((long)op->optinsn.insn - s32 rel = (s32)((long)op->optinsn.insn -
((long)op->kp.addr + RELATIVEJUMP_SIZE)); ((long)op->kp.addr + JMP32_INSN_SIZE));
WARN_ON(kprobe_disabled(&op->kp)); WARN_ON(kprobe_disabled(&op->kp));
/* Backup instructions which will be replaced by jump address */ /* Backup instructions which will be replaced by jump address */
memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_INSN_SIZE,
RELATIVE_ADDR_SIZE); DISP32_SIZE);
insn_buff[0] = RELATIVEJUMP_OPCODE; insn_buff[0] = JMP32_INSN_OPCODE;
*(s32 *)(&insn_buff[1]) = rel; *(s32 *)(&insn_buff[1]) = rel;
text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE, NULL); text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE, NULL);
list_del_init(&op->list); list_del_init(&op->list);
} }
...@@ -446,13 +444,13 @@ void arch_optimize_kprobes(struct list_head *oplist) ...@@ -446,13 +444,13 @@ void arch_optimize_kprobes(struct list_head *oplist)
/* Replace a relative jump with a breakpoint (int3). */ /* Replace a relative jump with a breakpoint (int3). */
void arch_unoptimize_kprobe(struct optimized_kprobe *op) void arch_unoptimize_kprobe(struct optimized_kprobe *op)
{ {
u8 insn_buff[RELATIVEJUMP_SIZE]; u8 insn_buff[JMP32_INSN_SIZE];
/* Set int3 to first byte for kprobes */ /* Set int3 to first byte for kprobes */
insn_buff[0] = BREAKPOINT_INSTRUCTION; insn_buff[0] = INT3_INSN_OPCODE;
memcpy(insn_buff + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); memcpy(insn_buff + 1, op->optinsn.copied_insn, DISP32_SIZE);
text_poke_bp(op->kp.addr, insn_buff, RELATIVEJUMP_SIZE, text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE,
text_gen_insn(JMP32_INSN_OPCODE, op->kp.addr, op->optinsn.insn)); text_gen_insn(JMP32_INSN_OPCODE, op->kp.addr, op->optinsn.insn));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment