Commit 2ae1f49a authored by Oleg Nesterov's avatar Oleg Nesterov

uprobes/x86: Add is_64bit_mm(), kill validate_insn_bits()

1. Extract the ->ia32_compat check from 64bit validate_insn_bits()
   into the new helper, is_64bit_mm(), it will have more users.

   TODO: this checks is actually wrong if mm owner is X32 task,
   we need another fix which changes set_personality_ia32().

   TODO: even worse, the whole 64-or-32-bit logic is very broken
   and the fix is not simple, we need the nontrivial changes in
   the core uprobes code.

2. Kill validate_insn_bits() and change its single caller to use
   uprobe_init_insn(is_64bit_mm(mm).
Signed-off-by: default avatarOleg Nesterov <oleg@redhat.com>
Reviewed-by: default avatarJim Keniston <jkenisto@us.ibm.com>
Acked-by: default avatarSrikar Dronamraju <srikar@linux.vnet.ibm.com>
parent 73175d0d
...@@ -231,6 +231,11 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool ...@@ -231,6 +231,11 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
} }
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
static inline bool is_64bit_mm(struct mm_struct *mm)
{
return !config_enabled(CONFIG_IA32_EMULATION) ||
!mm->context.ia32_compat;
}
/* /*
* If arch_uprobe->insn doesn't use rip-relative addressing, return * If arch_uprobe->insn doesn't use rip-relative addressing, return
* immediately. Otherwise, rewrite the instruction so that it accesses * immediately. Otherwise, rewrite the instruction so that it accesses
...@@ -355,13 +360,11 @@ handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long * ...@@ -355,13 +360,11 @@ handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *
*correction += 4; *correction += 4;
} }
} }
#else /* 32-bit: */
static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) static inline bool is_64bit_mm(struct mm_struct *mm)
{ {
bool x86_64 = !mm->context.ia32_compat; return false;
return uprobe_init_insn(auprobe, insn, x86_64);
} }
#else /* 32-bit: */
/* /*
* No RIP-relative addressing on 32-bit * No RIP-relative addressing on 32-bit
*/ */
...@@ -376,11 +379,6 @@ static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs * ...@@ -376,11 +379,6 @@ static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *
long *correction) long *correction)
{ {
} }
static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn)
{
return uprobe_init_insn(auprobe, insn, false);
}
#endif /* CONFIG_X86_64 */ #endif /* CONFIG_X86_64 */
struct uprobe_xol_ops { struct uprobe_xol_ops {
...@@ -625,7 +623,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, ...@@ -625,7 +623,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
bool fix_ip = true, fix_call = false; bool fix_ip = true, fix_call = false;
int ret; int ret;
ret = validate_insn_bits(auprobe, mm, &insn); ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm));
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment