Commit e6023adc authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull core fixes from Thomas Gleixner:

 - A collection of objtool fixes which address recent fallout partially
   exposed by newer toolchains, clang, BPF and general code changes.

 - Force USER_DS for user stack traces

[ Note: the "objtool fixes" are not all to objtool itself, but for
  kernel code that triggers objtool warnings.

  Things like missing function size annotations, or code that confuses
  the unwinder etc.   - Linus]

* 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
  objtool: Support conditional retpolines
  objtool: Convert insn type to enum
  objtool: Fix seg fault on bad switch table entry
  objtool: Support repeated uses of the same C jump table
  objtool: Refactor jump table code
  objtool: Refactor sibling call detection logic
  objtool: Do frame pointer check before dead end check
  objtool: Change dead_end_function() to return boolean
  objtool: Warn on zero-length functions
  objtool: Refactor function alias logic
  objtool: Track original function across branches
  objtool: Add mcsafe_handle_tail() to the uaccess safe list
  bpf: Disable GCC -fgcse optimization for ___bpf_prog_run()
  x86/uaccess: Remove redundant CLACs in getuser/putuser error paths
  x86/uaccess: Don't leak AC flag into fentry from mcsafe_handle_tail()
  x86/uaccess: Remove ELF function annotation from copy_user_handle_tail()
  x86/head/64: Annotate start_cpu0() as non-callable
  x86/entry: Fix thunk function ELF sizes
  x86/kvm: Don't call kvm_spurious_fault() from .fixup
  x86/kvm: Replace vmx_vmenter()'s call to kvm_spurious_fault() with UD2
  ...
parents 4b01f5a4 b68b9907
...@@ -12,9 +12,7 @@ ...@@ -12,9 +12,7 @@
/* rdi: arg1 ... normal C conventions. rax is saved/restored. */ /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
.macro THUNK name, func, put_ret_addr_in_rdi=0 .macro THUNK name, func, put_ret_addr_in_rdi=0
.globl \name ENTRY(\name)
.type \name, @function
\name:
pushq %rbp pushq %rbp
movq %rsp, %rbp movq %rsp, %rbp
...@@ -35,6 +33,7 @@ ...@@ -35,6 +33,7 @@
call \func call \func
jmp .L_restore jmp .L_restore
ENDPROC(\name)
_ASM_NOKPROBE(\name) _ASM_NOKPROBE(\name)
.endm .endm
......
...@@ -1496,25 +1496,29 @@ enum { ...@@ -1496,25 +1496,29 @@ enum {
#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0) #define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm) #define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
asmlinkage void __noreturn kvm_spurious_fault(void);
/* /*
* Hardware virtualization extension instructions may fault if a * Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running. * reboot turns off virtualization while processes are running.
* Trap the fault and ignore the instruction if that happens. * Usually after catching the fault we just panic; during reboot
* instead the instruction is ignored.
*/ */
asmlinkage void kvm_spurious_fault(void);
#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \ #define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
"666: " insn "\n\t" \ "666: \n\t" \
"668: \n\t" \ insn "\n\t" \
".pushsection .fixup, \"ax\" \n" \ "jmp 668f \n\t" \
"667: \n\t" \ "667: \n\t" \
"call kvm_spurious_fault \n\t" \
"668: \n\t" \
".pushsection .fixup, \"ax\" \n\t" \
"700: \n\t" \
cleanup_insn "\n\t" \ cleanup_insn "\n\t" \
"cmpb $0, kvm_rebooting \n\t" \ "cmpb $0, kvm_rebooting\n\t" \
"jne 668b \n\t" \ "je 667b \n\t" \
__ASM_SIZE(push) " $666b \n\t" \ "jmp 668b \n\t" \
"jmp kvm_spurious_fault \n\t" \
".popsection \n\t" \ ".popsection \n\t" \
_ASM_EXTABLE(666b, 667b) _ASM_EXTABLE(666b, 700b)
#define __kvm_handle_fault_on_reboot(insn) \ #define __kvm_handle_fault_on_reboot(insn) \
____kvm_handle_fault_on_reboot(insn, "") ____kvm_handle_fault_on_reboot(insn, "")
......
...@@ -746,6 +746,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu); ...@@ -746,6 +746,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
PV_RESTORE_ALL_CALLER_REGS \ PV_RESTORE_ALL_CALLER_REGS \
FRAME_END \ FRAME_END \
"ret;" \ "ret;" \
".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
".popsection") ".popsection")
/* Get a reference to a callee-save function */ /* Get a reference to a callee-save function */
......
...@@ -253,10 +253,10 @@ END(secondary_startup_64) ...@@ -253,10 +253,10 @@ END(secondary_startup_64)
* start_secondary() via .Ljump_to_C_code. * start_secondary() via .Ljump_to_C_code.
*/ */
ENTRY(start_cpu0) ENTRY(start_cpu0)
movq initial_stack(%rip), %rsp
UNWIND_HINT_EMPTY UNWIND_HINT_EMPTY
movq initial_stack(%rip), %rsp
jmp .Ljump_to_C_code jmp .Ljump_to_C_code
ENDPROC(start_cpu0) END(start_cpu0)
#endif #endif
/* Both SMP bootup and ACPI suspend change these variables */ /* Both SMP bootup and ACPI suspend change these variables */
......
...@@ -838,6 +838,7 @@ asm( ...@@ -838,6 +838,7 @@ asm(
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);" "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
"setne %al;" "setne %al;"
"ret;" "ret;"
".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
".popsection"); ".popsection");
#endif #endif
......
...@@ -312,29 +312,42 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt) ...@@ -312,29 +312,42 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
#define FOP_FUNC(name) \ #define __FOP_FUNC(name) \
".align " __stringify(FASTOP_SIZE) " \n\t" \ ".align " __stringify(FASTOP_SIZE) " \n\t" \
".type " name ", @function \n\t" \ ".type " name ", @function \n\t" \
name ":\n\t" name ":\n\t"
#define FOP_RET "ret \n\t" #define FOP_FUNC(name) \
__FOP_FUNC(#name)
#define __FOP_RET(name) \
"ret \n\t" \
".size " name ", .-" name "\n\t"
#define FOP_RET(name) \
__FOP_RET(#name)
#define FOP_START(op) \ #define FOP_START(op) \
extern void em_##op(struct fastop *fake); \ extern void em_##op(struct fastop *fake); \
asm(".pushsection .text, \"ax\" \n\t" \ asm(".pushsection .text, \"ax\" \n\t" \
".global em_" #op " \n\t" \ ".global em_" #op " \n\t" \
FOP_FUNC("em_" #op) ".align " __stringify(FASTOP_SIZE) " \n\t" \
"em_" #op ":\n\t"
#define FOP_END \ #define FOP_END \
".popsection") ".popsection")
#define __FOPNOP(name) \
__FOP_FUNC(name) \
__FOP_RET(name)
#define FOPNOP() \ #define FOPNOP() \
FOP_FUNC(__stringify(__UNIQUE_ID(nop))) \ __FOPNOP(__stringify(__UNIQUE_ID(nop)))
FOP_RET
#define FOP1E(op, dst) \ #define FOP1E(op, dst) \
FOP_FUNC(#op "_" #dst) \ __FOP_FUNC(#op "_" #dst) \
"10: " #op " %" #dst " \n\t" FOP_RET "10: " #op " %" #dst " \n\t" \
__FOP_RET(#op "_" #dst)
#define FOP1EEX(op, dst) \ #define FOP1EEX(op, dst) \
FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception) FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
...@@ -366,8 +379,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); ...@@ -366,8 +379,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
FOP_END FOP_END
#define FOP2E(op, dst, src) \ #define FOP2E(op, dst, src) \
FOP_FUNC(#op "_" #dst "_" #src) \ __FOP_FUNC(#op "_" #dst "_" #src) \
#op " %" #src ", %" #dst " \n\t" FOP_RET #op " %" #src ", %" #dst " \n\t" \
__FOP_RET(#op "_" #dst "_" #src)
#define FASTOP2(op) \ #define FASTOP2(op) \
FOP_START(op) \ FOP_START(op) \
...@@ -405,8 +419,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); ...@@ -405,8 +419,9 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
FOP_END FOP_END
#define FOP3E(op, dst, src, src2) \ #define FOP3E(op, dst, src, src2) \
FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \ __FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
#op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET #op " %" #src2 ", %" #src ", %" #dst " \n\t"\
__FOP_RET(#op "_" #dst "_" #src "_" #src2)
/* 3-operand, word-only, src2=cl */ /* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \ #define FASTOP3WCL(op) \
...@@ -423,7 +438,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); ...@@ -423,7 +438,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
".type " #op ", @function \n\t" \ ".type " #op ", @function \n\t" \
#op ": \n\t" \ #op ": \n\t" \
#op " %al \n\t" \ #op " %al \n\t" \
FOP_RET __FOP_RET(#op)
asm(".pushsection .fixup, \"ax\"\n" asm(".pushsection .fixup, \"ax\"\n"
".global kvm_fastop_exception \n" ".global kvm_fastop_exception \n"
...@@ -449,7 +464,10 @@ FOP_SETCC(setle) ...@@ -449,7 +464,10 @@ FOP_SETCC(setle)
FOP_SETCC(setnle) FOP_SETCC(setnle)
FOP_END; FOP_END;
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET FOP_START(salc)
FOP_FUNC(salc)
"pushf; sbb %al, %al; popf \n\t"
FOP_RET(salc)
FOP_END; FOP_END;
/* /*
......
...@@ -54,9 +54,9 @@ ENTRY(vmx_vmenter) ...@@ -54,9 +54,9 @@ ENTRY(vmx_vmenter)
ret ret
3: cmpb $0, kvm_rebooting 3: cmpb $0, kvm_rebooting
jne 4f je 4f
call kvm_spurious_fault ret
4: ret 4: ud2
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
5: jmp 3b 5: jmp 3b
......
...@@ -239,7 +239,7 @@ copy_user_handle_tail: ...@@ -239,7 +239,7 @@ copy_user_handle_tail:
ret ret
_ASM_EXTABLE_UA(1b, 2b) _ASM_EXTABLE_UA(1b, 2b)
ENDPROC(copy_user_handle_tail) END(copy_user_handle_tail)
/* /*
* copy_user_nocache - Uncached memory copy with exception handling * copy_user_nocache - Uncached memory copy with exception handling
......
...@@ -115,29 +115,29 @@ ENDPROC(__get_user_8) ...@@ -115,29 +115,29 @@ ENDPROC(__get_user_8)
EXPORT_SYMBOL(__get_user_8) EXPORT_SYMBOL(__get_user_8)
bad_get_user_clac:
ASM_CLAC
bad_get_user: bad_get_user:
xor %edx,%edx xor %edx,%edx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC
ret ret
END(bad_get_user)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
bad_get_user_8_clac:
ASM_CLAC
bad_get_user_8: bad_get_user_8:
xor %edx,%edx xor %edx,%edx
xor %ecx,%ecx xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX mov $(-EFAULT),%_ASM_AX
ASM_CLAC
ret ret
END(bad_get_user_8)
#endif #endif
_ASM_EXTABLE_UA(1b, bad_get_user) _ASM_EXTABLE_UA(1b, bad_get_user_clac)
_ASM_EXTABLE_UA(2b, bad_get_user) _ASM_EXTABLE_UA(2b, bad_get_user_clac)
_ASM_EXTABLE_UA(3b, bad_get_user) _ASM_EXTABLE_UA(3b, bad_get_user_clac)
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
_ASM_EXTABLE_UA(4b, bad_get_user) _ASM_EXTABLE_UA(4b, bad_get_user_clac)
#else #else
_ASM_EXTABLE_UA(4b, bad_get_user_8) _ASM_EXTABLE_UA(4b, bad_get_user_8_clac)
_ASM_EXTABLE_UA(5b, bad_get_user_8) _ASM_EXTABLE_UA(5b, bad_get_user_8_clac)
#endif #endif
...@@ -32,8 +32,6 @@ ...@@ -32,8 +32,6 @@
*/ */
#define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX #define ENTER mov PER_CPU_VAR(current_task), %_ASM_BX
#define EXIT ASM_CLAC ; \
ret
.text .text
ENTRY(__put_user_1) ENTRY(__put_user_1)
...@@ -43,7 +41,8 @@ ENTRY(__put_user_1) ...@@ -43,7 +41,8 @@ ENTRY(__put_user_1)
ASM_STAC ASM_STAC
1: movb %al,(%_ASM_CX) 1: movb %al,(%_ASM_CX)
xor %eax,%eax xor %eax,%eax
EXIT ASM_CLAC
ret
ENDPROC(__put_user_1) ENDPROC(__put_user_1)
EXPORT_SYMBOL(__put_user_1) EXPORT_SYMBOL(__put_user_1)
...@@ -56,7 +55,8 @@ ENTRY(__put_user_2) ...@@ -56,7 +55,8 @@ ENTRY(__put_user_2)
ASM_STAC ASM_STAC
2: movw %ax,(%_ASM_CX) 2: movw %ax,(%_ASM_CX)
xor %eax,%eax xor %eax,%eax
EXIT ASM_CLAC
ret
ENDPROC(__put_user_2) ENDPROC(__put_user_2)
EXPORT_SYMBOL(__put_user_2) EXPORT_SYMBOL(__put_user_2)
...@@ -69,7 +69,8 @@ ENTRY(__put_user_4) ...@@ -69,7 +69,8 @@ ENTRY(__put_user_4)
ASM_STAC ASM_STAC
3: movl %eax,(%_ASM_CX) 3: movl %eax,(%_ASM_CX)
xor %eax,%eax xor %eax,%eax
EXIT ASM_CLAC
ret
ENDPROC(__put_user_4) ENDPROC(__put_user_4)
EXPORT_SYMBOL(__put_user_4) EXPORT_SYMBOL(__put_user_4)
...@@ -85,19 +86,21 @@ ENTRY(__put_user_8) ...@@ -85,19 +86,21 @@ ENTRY(__put_user_8)
5: movl %edx,4(%_ASM_CX) 5: movl %edx,4(%_ASM_CX)
#endif #endif
xor %eax,%eax xor %eax,%eax
EXIT ASM_CLAC
RET
ENDPROC(__put_user_8) ENDPROC(__put_user_8)
EXPORT_SYMBOL(__put_user_8) EXPORT_SYMBOL(__put_user_8)
bad_put_user_clac:
ASM_CLAC
bad_put_user: bad_put_user:
movl $-EFAULT,%eax movl $-EFAULT,%eax
EXIT RET
END(bad_put_user)
_ASM_EXTABLE_UA(1b, bad_put_user) _ASM_EXTABLE_UA(1b, bad_put_user_clac)
_ASM_EXTABLE_UA(2b, bad_put_user) _ASM_EXTABLE_UA(2b, bad_put_user_clac)
_ASM_EXTABLE_UA(3b, bad_put_user) _ASM_EXTABLE_UA(3b, bad_put_user_clac)
_ASM_EXTABLE_UA(4b, bad_put_user) _ASM_EXTABLE_UA(4b, bad_put_user_clac)
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
_ASM_EXTABLE_UA(5b, bad_put_user) _ASM_EXTABLE_UA(5b, bad_put_user_clac)
#endif #endif
...@@ -60,7 +60,7 @@ EXPORT_SYMBOL(clear_user); ...@@ -60,7 +60,7 @@ EXPORT_SYMBOL(clear_user);
* but reuse __memcpy_mcsafe in case a new read error is encountered. * but reuse __memcpy_mcsafe in case a new read error is encountered.
* clac() is handled in _copy_to_iter_mcsafe(). * clac() is handled in _copy_to_iter_mcsafe().
*/ */
__visible unsigned long __visible notrace unsigned long
mcsafe_handle_tail(char *to, char *from, unsigned len) mcsafe_handle_tail(char *to, char *from, unsigned len)
{ {
for (; len; --len, to++, from++) { for (; len; --len, to++, from++) {
......
...@@ -170,3 +170,5 @@ ...@@ -170,3 +170,5 @@
#else #else
#define __diag_GCC_8(s) #define __diag_GCC_8(s)
#endif #endif
#define __no_fgcse __attribute__((optimize("-fno-gcse")))
...@@ -116,9 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, ...@@ -116,9 +116,14 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
".pushsection .discard.unreachable\n\t" \ ".pushsection .discard.unreachable\n\t" \
".long 999b - .\n\t" \ ".long 999b - .\n\t" \
".popsection\n\t" ".popsection\n\t"
/* Annotate a C jump table to allow objtool to follow the code flow */
#define __annotate_jump_table __section(".rodata..c_jump_table")
#else #else
#define annotate_reachable() #define annotate_reachable()
#define annotate_unreachable() #define annotate_unreachable()
#define __annotate_jump_table
#endif #endif
#ifndef ASM_UNREACHABLE #ifndef ASM_UNREACHABLE
......
...@@ -189,6 +189,10 @@ struct ftrace_likely_data { ...@@ -189,6 +189,10 @@ struct ftrace_likely_data {
#define asm_volatile_goto(x...) asm goto(x) #define asm_volatile_goto(x...) asm goto(x)
#endif #endif
#ifndef __no_fgcse
# define __no_fgcse
#endif
/* Are two types/vars the same type (ignoring qualifiers)? */ /* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
......
...@@ -1295,11 +1295,11 @@ bool bpf_opcode_in_insntable(u8 code) ...@@ -1295,11 +1295,11 @@ bool bpf_opcode_in_insntable(u8 code)
* *
* Decode and execute eBPF instructions. * Decode and execute eBPF instructions.
*/ */
static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
{ {
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z
static const void *jumptable[256] = { static const void * const jumptable[256] __annotate_jump_table = {
[0 ... 255] = &&default_label, [0 ... 255] = &&default_label,
/* Now overwrite non-defaults ... */ /* Now overwrite non-defaults ... */
BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL),
...@@ -1558,7 +1558,6 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) ...@@ -1558,7 +1558,6 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
BUG_ON(1); BUG_ON(1);
return 0; return 0;
} }
STACK_FRAME_NON_STANDARD(___bpf_prog_run); /* jump table */
#define PROG_NAME(stack_size) __bpf_prog_run##stack_size #define PROG_NAME(stack_size) __bpf_prog_run##stack_size
#define DEFINE_BPF_PROG_RUN(stack_size) \ #define DEFINE_BPF_PROG_RUN(stack_size) \
......
...@@ -226,12 +226,17 @@ unsigned int stack_trace_save_user(unsigned long *store, unsigned int size) ...@@ -226,12 +226,17 @@ unsigned int stack_trace_save_user(unsigned long *store, unsigned int size)
.store = store, .store = store,
.size = size, .size = size,
}; };
mm_segment_t fs;
/* Trace user stack if not a kernel thread */ /* Trace user stack if not a kernel thread */
if (current->flags & PF_KTHREAD) if (current->flags & PF_KTHREAD)
return 0; return 0;
fs = get_fs();
set_fs(USER_DS);
arch_stack_walk_user(consume_entry, &c, task_pt_regs(current)); arch_stack_walk_user(consume_entry, &c, task_pt_regs(current));
set_fs(fs);
return c.len; return c.len;
} }
#endif #endif
......
...@@ -11,22 +11,24 @@ ...@@ -11,22 +11,24 @@
#include "elf.h" #include "elf.h"
#include "cfi.h" #include "cfi.h"
#define INSN_JUMP_CONDITIONAL 1 enum insn_type {
#define INSN_JUMP_UNCONDITIONAL 2 INSN_JUMP_CONDITIONAL,
#define INSN_JUMP_DYNAMIC 3 INSN_JUMP_UNCONDITIONAL,
#define INSN_CALL 4 INSN_JUMP_DYNAMIC,
#define INSN_CALL_DYNAMIC 5 INSN_JUMP_DYNAMIC_CONDITIONAL,
#define INSN_RETURN 6 INSN_CALL,
#define INSN_CONTEXT_SWITCH 7 INSN_CALL_DYNAMIC,
#define INSN_STACK 8 INSN_RETURN,
#define INSN_BUG 9 INSN_CONTEXT_SWITCH,
#define INSN_NOP 10 INSN_STACK,
#define INSN_STAC 11 INSN_BUG,
#define INSN_CLAC 12 INSN_NOP,
#define INSN_STD 13 INSN_STAC,
#define INSN_CLD 14 INSN_CLAC,
#define INSN_OTHER 15 INSN_STD,
#define INSN_LAST INSN_OTHER INSN_CLD,
INSN_OTHER,
};
enum op_dest_type { enum op_dest_type {
OP_DEST_REG, OP_DEST_REG,
...@@ -68,7 +70,7 @@ void arch_initial_func_cfi_state(struct cfi_state *state); ...@@ -68,7 +70,7 @@ void arch_initial_func_cfi_state(struct cfi_state *state);
int arch_decode_instruction(struct elf *elf, struct section *sec, int arch_decode_instruction(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int maxlen, unsigned long offset, unsigned int maxlen,
unsigned int *len, unsigned char *type, unsigned int *len, enum insn_type *type,
unsigned long *immediate, struct stack_op *op); unsigned long *immediate, struct stack_op *op);
bool arch_callee_saved_reg(unsigned char reg); bool arch_callee_saved_reg(unsigned char reg);
......
...@@ -68,7 +68,7 @@ bool arch_callee_saved_reg(unsigned char reg) ...@@ -68,7 +68,7 @@ bool arch_callee_saved_reg(unsigned char reg)
int arch_decode_instruction(struct elf *elf, struct section *sec, int arch_decode_instruction(struct elf *elf, struct section *sec,
unsigned long offset, unsigned int maxlen, unsigned long offset, unsigned int maxlen,
unsigned int *len, unsigned char *type, unsigned int *len, enum insn_type *type,
unsigned long *immediate, struct stack_op *op) unsigned long *immediate, struct stack_op *op)
{ {
struct insn insn; struct insn insn;
......
This diff is collapsed.
...@@ -31,13 +31,14 @@ struct instruction { ...@@ -31,13 +31,14 @@ struct instruction {
struct section *sec; struct section *sec;
unsigned long offset; unsigned long offset;
unsigned int len; unsigned int len;
unsigned char type; enum insn_type type;
unsigned long immediate; unsigned long immediate;
bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts; bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
bool retpoline_safe; bool retpoline_safe;
struct symbol *call_dest; struct symbol *call_dest;
struct instruction *jump_dest; struct instruction *jump_dest;
struct instruction *first_jump_src; struct instruction *first_jump_src;
struct rela *jump_table;
struct list_head alts; struct list_head alts;
struct symbol *func; struct symbol *func;
struct stack_op stack_op; struct stack_op stack_op;
......
...@@ -278,7 +278,7 @@ static int read_symbols(struct elf *elf) ...@@ -278,7 +278,7 @@ static int read_symbols(struct elf *elf)
} }
if (sym->offset == s->offset) { if (sym->offset == s->offset) {
if (sym->len == s->len && alias == sym) if (sym->len && sym->len == s->len && alias == sym)
alias = s; alias = s;
if (sym->len >= s->len) { if (sym->len >= s->len) {
...@@ -385,7 +385,7 @@ static int read_relas(struct elf *elf) ...@@ -385,7 +385,7 @@ static int read_relas(struct elf *elf)
rela->offset = rela->rela.r_offset; rela->offset = rela->rela.r_offset;
symndx = GELF_R_SYM(rela->rela.r_info); symndx = GELF_R_SYM(rela->rela.r_info);
rela->sym = find_symbol_by_index(elf, symndx); rela->sym = find_symbol_by_index(elf, symndx);
rela->rela_sec = sec; rela->sec = sec;
if (!rela->sym) { if (!rela->sym) {
WARN("can't find rela entry symbol %d for %s", WARN("can't find rela entry symbol %d for %s",
symndx, sec->name); symndx, sec->name);
...@@ -401,7 +401,7 @@ static int read_relas(struct elf *elf) ...@@ -401,7 +401,7 @@ static int read_relas(struct elf *elf)
return 0; return 0;
} }
struct elf *elf_open(const char *name, int flags) struct elf *elf_read(const char *name, int flags)
{ {
struct elf *elf; struct elf *elf;
Elf_Cmd cmd; Elf_Cmd cmd;
...@@ -463,7 +463,7 @@ struct section *elf_create_section(struct elf *elf, const char *name, ...@@ -463,7 +463,7 @@ struct section *elf_create_section(struct elf *elf, const char *name,
{ {
struct section *sec, *shstrtab; struct section *sec, *shstrtab;
size_t size = entsize * nr; size_t size = entsize * nr;
struct Elf_Scn *s; Elf_Scn *s;
Elf_Data *data; Elf_Data *data;
sec = malloc(sizeof(*sec)); sec = malloc(sizeof(*sec));
......
...@@ -57,11 +57,12 @@ struct rela { ...@@ -57,11 +57,12 @@ struct rela {
struct list_head list; struct list_head list;
struct hlist_node hash; struct hlist_node hash;
GElf_Rela rela; GElf_Rela rela;
struct section *rela_sec; struct section *sec;
struct symbol *sym; struct symbol *sym;
unsigned int type; unsigned int type;
unsigned long offset; unsigned long offset;
int addend; int addend;
bool jump_table_start;
}; };
struct elf { struct elf {
...@@ -74,7 +75,7 @@ struct elf { ...@@ -74,7 +75,7 @@ struct elf {
}; };
struct elf *elf_open(const char *name, int flags); struct elf *elf_read(const char *name, int flags);
struct section *find_section_by_name(struct elf *elf, const char *name); struct section *find_section_by_name(struct elf *elf, const char *name);
struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset); struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
struct symbol *find_symbol_by_name(struct elf *elf, const char *name); struct symbol *find_symbol_by_name(struct elf *elf, const char *name);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment