Commit 0b86c75d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching

Pull livepatching updates from Jiri Kosina:

 - remove of our own implementation of architecture-specific relocation
   code and leveraging existing code in the module loader to perform
   arch-dependent work, from Jessica Yu.

   The relevant patches have been acked by Rusty (for module.c) and
   Heiko (for s390).

 - live patching support for ppc64le, which is a joint work of Michael
   Ellerman and Torsten Duwe.  This is coming from topic branch that is
   share between livepatching.git and ppc tree.

 - addition of livepatching documentation from Petr Mladek

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching:
  livepatch: make object/func-walking helpers more robust
  livepatch: Add some basic livepatch documentation
  powerpc/livepatch: Add live patching support on ppc64le
  powerpc/livepatch: Add livepatch stack to struct thread_info
  powerpc/livepatch: Add livepatch header
  livepatch: Allow architectures to specify an alternate ftrace location
  ftrace: Make ftrace_location_range() global
  livepatch: robustify klp_register_patch() API error checking
  Documentation: livepatch: outline Elf format and requirements for patch modules
  livepatch: reuse module loader code to write relocations
  module: s390: keep mod_arch_specific for livepatch modules
  module: preserve Elf information for livepatch modules
  Elf: add livepatch-specific Elf constants
parents 16bf8348 be69f70e
This diff is collapsed.
This diff is collapsed.
...@@ -6699,6 +6699,7 @@ F: kernel/livepatch/ ...@@ -6699,6 +6699,7 @@ F: kernel/livepatch/
F: include/linux/livepatch.h F: include/linux/livepatch.h
F: arch/x86/include/asm/livepatch.h F: arch/x86/include/asm/livepatch.h
F: arch/x86/kernel/livepatch.c F: arch/x86/kernel/livepatch.c
F: Documentation/livepatch/
F: Documentation/ABI/testing/sysfs-kernel-livepatch F: Documentation/ABI/testing/sysfs-kernel-livepatch
F: samples/livepatch/ F: samples/livepatch/
L: live-patching@vger.kernel.org L: live-patching@vger.kernel.org
......
...@@ -160,6 +160,7 @@ config PPC ...@@ -160,6 +160,7 @@ config PPC
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
config GENERIC_CSUM config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN def_bool CPU_LITTLE_ENDIAN
...@@ -1107,3 +1108,5 @@ config PPC_LIB_RHEAP ...@@ -1107,3 +1108,5 @@ config PPC_LIB_RHEAP
bool bool
source "arch/powerpc/kvm/Kconfig" source "arch/powerpc/kvm/Kconfig"
source "kernel/livepatch/Kconfig"
/* /*
* livepatch.c - x86-specific Kernel Live Patching Core * livepatch.h - powerpc-specific Kernel Live Patching Core
* *
* Copyright (C) 2014 Seth Jennings <sjenning@redhat.com> * Copyright (C) 2015-2016, SUSE, IBM Corp.
* Copyright (C) 2014 SUSE
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License * modify it under the terms of the GNU General Public License
...@@ -17,54 +16,47 @@ ...@@ -17,54 +16,47 @@
* You should have received a copy of the GNU General Public License * You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>. * along with this program; if not, see <http://www.gnu.org/licenses/>.
*/ */
#ifndef _ASM_POWERPC_LIVEPATCH_H
#define _ASM_POWERPC_LIVEPATCH_H
#include <linux/module.h> #include <linux/module.h>
#include <linux/uaccess.h> #include <linux/ftrace.h>
#include <asm/elf.h>
#include <asm/livepatch.h>
/** #ifdef CONFIG_LIVEPATCH
* klp_write_module_reloc() - write a relocation in a module static inline int klp_check_compiler_support(void)
* @mod: module in which the section to be modified is found
* @type: ELF relocation type (see asm/elf.h)
* @loc: address that the relocation should be written to
* @value: relocation value (sym address + addend)
*
* This function writes a relocation to the specified location for
* a particular module.
*/
int klp_write_module_reloc(struct module *mod, unsigned long type,
unsigned long loc, unsigned long value)
{ {
size_t size = 4;
unsigned long val;
unsigned long core = (unsigned long)mod->core_layout.base;
unsigned long core_size = mod->core_layout.size;
switch (type) {
case R_X86_64_NONE:
return 0; return 0;
case R_X86_64_64: }
val = value;
size = 8;
break;
case R_X86_64_32:
val = (u32)value;
break;
case R_X86_64_32S:
val = (s32)value;
break;
case R_X86_64_PC32:
val = (u32)(value - loc);
break;
default:
/* unsupported relocation type */
return -EINVAL;
}
if (loc < core || loc >= core + core_size) static inline int klp_write_module_reloc(struct module *mod, unsigned long
/* loc does not point to any symbol inside the module */ type, unsigned long loc, unsigned long value)
return -EINVAL; {
/* This requires infrastructure changes; we need the loadinfos. */
return -ENOSYS;
}
return probe_kernel_write((void *)loc, &val, size); static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->nip = ip;
} }
#define klp_get_ftrace_location klp_get_ftrace_location
static inline unsigned long klp_get_ftrace_location(unsigned long faddr)
{
/*
* Live patch works only with -mprofile-kernel on PPC. In this case,
* the ftrace location is always within the first 16 bytes.
*/
return ftrace_location_range(faddr, faddr + 16);
}
static inline void klp_init_thread_info(struct thread_info *ti)
{
/* + 1 to account for STACK_END_MAGIC */
ti->livepatch_sp = (unsigned long *)(ti + 1) + 1;
}
#else
static void klp_init_thread_info(struct thread_info *ti) { }
#endif /* CONFIG_LIVEPATCH */
#endif /* _ASM_POWERPC_LIVEPATCH_H */
...@@ -43,7 +43,9 @@ struct thread_info { ...@@ -43,7 +43,9 @@ struct thread_info {
int preempt_count; /* 0 => preemptable, int preempt_count; /* 0 => preemptable,
<0 => BUG */ <0 => BUG */
unsigned long local_flags; /* private flags for thread */ unsigned long local_flags; /* private flags for thread */
#ifdef CONFIG_LIVEPATCH
unsigned long *livepatch_sp;
#endif
/* low level flags - has atomic operations done on it */ /* low level flags - has atomic operations done on it */
unsigned long flags ____cacheline_aligned_in_smp; unsigned long flags ____cacheline_aligned_in_smp;
}; };
......
...@@ -86,6 +86,10 @@ int main(void) ...@@ -86,6 +86,10 @@ int main(void)
DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit)); DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
#endif /* CONFIG_PPC64 */ #endif /* CONFIG_PPC64 */
#ifdef CONFIG_LIVEPATCH
DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
#endif
DEFINE(KSP, offsetof(struct thread_struct, ksp)); DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs)); DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
#ifdef CONFIG_BOOKE #ifdef CONFIG_BOOKE
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/magic.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -1248,6 +1249,9 @@ _GLOBAL(ftrace_caller) ...@@ -1248,6 +1249,9 @@ _GLOBAL(ftrace_caller)
addi r3,r3,function_trace_op@toc@l addi r3,r3,function_trace_op@toc@l
ld r5,0(r3) ld r5,0(r3)
#ifdef CONFIG_LIVEPATCH
mr r14,r7 /* remember old NIP */
#endif
/* Calculate ip from nip-4 into r3 for call below */ /* Calculate ip from nip-4 into r3 for call below */
subi r3, r7, MCOUNT_INSN_SIZE subi r3, r7, MCOUNT_INSN_SIZE
...@@ -1272,6 +1276,9 @@ ftrace_call: ...@@ -1272,6 +1276,9 @@ ftrace_call:
/* Load ctr with the possibly modified NIP */ /* Load ctr with the possibly modified NIP */
ld r3, _NIP(r1) ld r3, _NIP(r1)
mtctr r3 mtctr r3
#ifdef CONFIG_LIVEPATCH
cmpd r14,r3 /* has NIP been altered? */
#endif
/* Restore gprs */ /* Restore gprs */
REST_8GPRS(0,r1) REST_8GPRS(0,r1)
...@@ -1289,6 +1296,11 @@ ftrace_call: ...@@ -1289,6 +1296,11 @@ ftrace_call:
ld r0, LRSAVE(r1) ld r0, LRSAVE(r1)
mtlr r0 mtlr r0
#ifdef CONFIG_LIVEPATCH
/* Based on the cmpd above, if the NIP was altered handle livepatch */
bne- livepatch_handler
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
stdu r1, -112(r1) stdu r1, -112(r1)
.globl ftrace_graph_call .globl ftrace_graph_call
...@@ -1305,6 +1317,91 @@ _GLOBAL(ftrace_graph_stub) ...@@ -1305,6 +1317,91 @@ _GLOBAL(ftrace_graph_stub)
_GLOBAL(ftrace_stub) _GLOBAL(ftrace_stub)
blr blr
#ifdef CONFIG_LIVEPATCH
/*
* This function runs in the mcount context, between two functions. As
* such it can only clobber registers which are volatile and used in
* function linkage.
*
* We get here when a function A, calls another function B, but B has
* been live patched with a new function C.
*
* On entry:
* - we have no stack frame and can not allocate one
* - LR points back to the original caller (in A)
* - CTR holds the new NIP in C
* - r0 & r12 are free
*
* r0 can't be used as the base register for a DS-form load or store, so
* we temporarily shuffle r1 (stack pointer) into r0 and then put it back.
*/
livepatch_handler:
CURRENT_THREAD_INFO(r12, r1)
/* Save stack pointer into r0 */
mr r0, r1
/* Allocate 3 x 8 bytes */
ld r1, TI_livepatch_sp(r12)
addi r1, r1, 24
std r1, TI_livepatch_sp(r12)
/* Save toc & real LR on livepatch stack */
std r2, -24(r1)
mflr r12
std r12, -16(r1)
/* Store stack end marker */
lis r12, STACK_END_MAGIC@h
ori r12, r12, STACK_END_MAGIC@l
std r12, -8(r1)
/* Restore real stack pointer */
mr r1, r0
/* Put ctr in r12 for global entry and branch there */
mfctr r12
bctrl
/*
* Now we are returning from the patched function to the original
* caller A. We are free to use r0 and r12, and we can use r2 until we
* restore it.
*/
CURRENT_THREAD_INFO(r12, r1)
/* Save stack pointer into r0 */
mr r0, r1
ld r1, TI_livepatch_sp(r12)
/* Check stack marker hasn't been trashed */
lis r2, STACK_END_MAGIC@h
ori r2, r2, STACK_END_MAGIC@l
ld r12, -8(r1)
1: tdne r12, r2
EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0
/* Restore LR & toc from livepatch stack */
ld r12, -16(r1)
mtlr r12
ld r2, -24(r1)
/* Pop livepatch stack frame */
CURRENT_THREAD_INFO(r12, r0)
subi r1, r1, 24
std r1, TI_livepatch_sp(r12)
/* Restore real stack pointer */
mr r1, r0
/* Return to original caller of live patched function */
blr
#endif
#else #else
_GLOBAL_TOC(_mcount) _GLOBAL_TOC(_mcount)
/* Taken from output of objdump from lib64/glibc */ /* Taken from output of objdump from lib64/glibc */
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include <asm/udbg.h> #include <asm/udbg.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/debug.h> #include <asm/debug.h>
#include <asm/livepatch.h>
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/paca.h> #include <asm/paca.h>
...@@ -607,10 +608,12 @@ void irq_ctx_init(void) ...@@ -607,10 +608,12 @@ void irq_ctx_init(void)
memset((void *)softirq_ctx[i], 0, THREAD_SIZE); memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
tp = softirq_ctx[i]; tp = softirq_ctx[i];
tp->cpu = i; tp->cpu = i;
klp_init_thread_info(tp);
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
tp = hardirq_ctx[i]; tp = hardirq_ctx[i];
tp->cpu = i; tp->cpu = i;
klp_init_thread_info(tp);
} }
} }
......
...@@ -55,6 +55,8 @@ ...@@ -55,6 +55,8 @@
#include <asm/firmware.h> #include <asm/firmware.h>
#endif #endif
#include <asm/code-patching.h> #include <asm/code-patching.h>
#include <asm/livepatch.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/kdebug.h> #include <linux/kdebug.h>
...@@ -1400,13 +1402,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -1400,13 +1402,15 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
extern void ret_from_kernel_thread(void); extern void ret_from_kernel_thread(void);
void (*f)(void); void (*f)(void);
unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
struct thread_info *ti = task_thread_info(p);
klp_init_thread_info(ti);
/* Copy registers */ /* Copy registers */
sp -= sizeof(struct pt_regs); sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp; childregs = (struct pt_regs *) sp;
if (unlikely(p->flags & PF_KTHREAD)) { if (unlikely(p->flags & PF_KTHREAD)) {
/* kernel thread */ /* kernel thread */
struct thread_info *ti = (void *)task_stack_page(p);
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs); childregs->gpr[1] = sp + sizeof(struct pt_regs);
/* function */ /* function */
......
...@@ -69,6 +69,7 @@ ...@@ -69,6 +69,7 @@
#include <asm/kvm_ppc.h> #include <asm/kvm_ppc.h>
#include <asm/hugetlb.h> #include <asm/hugetlb.h>
#include <asm/epapr_hcalls.h> #include <asm/epapr_hcalls.h>
#include <asm/livepatch.h>
#ifdef DEBUG #ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt) #define DBG(fmt...) udbg_printf(fmt)
...@@ -667,16 +668,16 @@ static void __init emergency_stack_init(void) ...@@ -667,16 +668,16 @@ static void __init emergency_stack_init(void)
limit = min(safe_stack_limit(), ppc64_rma_size); limit = min(safe_stack_limit(), ppc64_rma_size);
for_each_possible_cpu(i) { for_each_possible_cpu(i) {
unsigned long sp; struct thread_info *ti;
sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
sp += THREAD_SIZE; klp_init_thread_info(ti);
paca[i].emergency_sp = __va(sp); paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
#ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_BOOK3S_64
/* emergency stack for machine check exception handling. */ /* emergency stack for machine check exception handling. */
sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
sp += THREAD_SIZE; klp_init_thread_info(ti);
paca[i].mc_emergency_sp = __va(sp); paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
#endif #endif
} }
} }
...@@ -700,6 +701,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -700,6 +701,8 @@ void __init setup_arch(char **cmdline_p)
if (ppc_md.panic) if (ppc_md.panic)
setup_panic(); setup_panic();
klp_init_thread_info(&init_thread_info);
init_mm.start_code = (unsigned long)_stext; init_mm.start_code = (unsigned long)_stext;
init_mm.end_code = (unsigned long) _etext; init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata; init_mm.end_data = (unsigned long) _edata;
......
...@@ -24,13 +24,6 @@ static inline int klp_check_compiler_support(void) ...@@ -24,13 +24,6 @@ static inline int klp_check_compiler_support(void)
return 0; return 0;
} }
static inline int klp_write_module_reloc(struct module *mod, unsigned long
type, unsigned long loc, unsigned long value)
{
/* not supported yet */
return -ENOSYS;
}
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{ {
regs->psw.addr = ip; regs->psw.addr = ip;
......
...@@ -51,6 +51,10 @@ void *module_alloc(unsigned long size) ...@@ -51,6 +51,10 @@ void *module_alloc(unsigned long size)
void module_arch_freeing_init(struct module *mod) void module_arch_freeing_init(struct module *mod)
{ {
if (is_livepatch_module(mod) &&
mod->state == MODULE_STATE_LIVE)
return;
vfree(mod->arch.syminfo); vfree(mod->arch.syminfo);
mod->arch.syminfo = NULL; mod->arch.syminfo = NULL;
} }
...@@ -425,7 +429,5 @@ int module_finalize(const Elf_Ehdr *hdr, ...@@ -425,7 +429,5 @@ int module_finalize(const Elf_Ehdr *hdr,
struct module *me) struct module *me)
{ {
jump_label_apply_nops(me); jump_label_apply_nops(me);
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
return 0; return 0;
} }
...@@ -32,8 +32,6 @@ static inline int klp_check_compiler_support(void) ...@@ -32,8 +32,6 @@ static inline int klp_check_compiler_support(void)
#endif #endif
return 0; return 0;
} }
int klp_write_module_reloc(struct module *mod, unsigned long type,
unsigned long loc, unsigned long value);
static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{ {
......
...@@ -83,7 +83,6 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o ...@@ -83,7 +83,6 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/ obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
obj-$(CONFIG_X86_TSC) += trace_clock.o obj-$(CONFIG_X86_TSC) += trace_clock.o
......
...@@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable); ...@@ -455,6 +455,7 @@ int ftrace_update_record(struct dyn_ftrace *rec, int enable);
int ftrace_test_record(struct dyn_ftrace *rec, int enable); int ftrace_test_record(struct dyn_ftrace *rec, int enable);
void ftrace_run_stop_machine(int command); void ftrace_run_stop_machine(int command);
unsigned long ftrace_location(unsigned long ip); unsigned long ftrace_location(unsigned long ip);
unsigned long ftrace_location_range(unsigned long start, unsigned long end);
unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec); unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec); unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
......
...@@ -64,28 +64,9 @@ struct klp_func { ...@@ -64,28 +64,9 @@ struct klp_func {
struct list_head stack_node; struct list_head stack_node;
}; };
/**
* struct klp_reloc - relocation structure for live patching
* @loc: address where the relocation will be written
* @sympos: position in kallsyms to disambiguate symbols (optional)
* @type: ELF relocation type
* @name: name of the referenced symbol (for lookup/verification)
* @addend: offset from the referenced symbol
* @external: symbol is either exported or within the live patch module itself
*/
struct klp_reloc {
unsigned long loc;
unsigned long sympos;
unsigned long type;
const char *name;
int addend;
int external;
};
/** /**
* struct klp_object - kernel object structure for live patching * struct klp_object - kernel object structure for live patching
* @name: module name (or NULL for vmlinux) * @name: module name (or NULL for vmlinux)
* @relocs: relocation entries to be applied at load time
* @funcs: function entries for functions to be patched in the object * @funcs: function entries for functions to be patched in the object
* @kobj: kobject for sysfs resources * @kobj: kobject for sysfs resources
* @mod: kernel module associated with the patched object * @mod: kernel module associated with the patched object
...@@ -95,7 +76,6 @@ struct klp_reloc { ...@@ -95,7 +76,6 @@ struct klp_reloc {
struct klp_object { struct klp_object {
/* external */ /* external */
const char *name; const char *name;
struct klp_reloc *relocs;
struct klp_func *funcs; struct klp_func *funcs;
/* internal */ /* internal */
...@@ -124,10 +104,12 @@ struct klp_patch { ...@@ -124,10 +104,12 @@ struct klp_patch {
}; };
#define klp_for_each_object(patch, obj) \ #define klp_for_each_object(patch, obj) \
for (obj = patch->objs; obj->funcs; obj++) for (obj = patch->objs; obj->funcs || obj->name; obj++)
#define klp_for_each_func(obj, func) \ #define klp_for_each_func(obj, func) \
for (func = obj->funcs; func->old_name; func++) for (func = obj->funcs; \
func->old_name || func->new_func || func->old_sympos; \
func++)
int klp_register_patch(struct klp_patch *); int klp_register_patch(struct klp_patch *);
int klp_unregister_patch(struct klp_patch *); int klp_unregister_patch(struct klp_patch *);
......
...@@ -330,6 +330,15 @@ struct mod_kallsyms { ...@@ -330,6 +330,15 @@ struct mod_kallsyms {
char *strtab; char *strtab;
}; };
#ifdef CONFIG_LIVEPATCH
struct klp_modinfo {
Elf_Ehdr hdr;
Elf_Shdr *sechdrs;
char *secstrings;
unsigned int symndx;
};
#endif
struct module { struct module {
enum module_state state; enum module_state state;
...@@ -456,7 +465,11 @@ struct module { ...@@ -456,7 +465,11 @@ struct module {
#endif #endif
#ifdef CONFIG_LIVEPATCH #ifdef CONFIG_LIVEPATCH
bool klp; /* Is this a livepatch module? */
bool klp_alive; bool klp_alive;
/* Elf information */
struct klp_modinfo *klp_info;
#endif #endif
#ifdef CONFIG_MODULE_UNLOAD #ifdef CONFIG_MODULE_UNLOAD
...@@ -630,6 +643,18 @@ static inline bool module_requested_async_probing(struct module *module) ...@@ -630,6 +643,18 @@ static inline bool module_requested_async_probing(struct module *module)
return module && module->async_probe_requested; return module && module->async_probe_requested;
} }
#ifdef CONFIG_LIVEPATCH
static inline bool is_livepatch_module(struct module *mod)
{
return mod->klp;
}
#else /* !CONFIG_LIVEPATCH */
static inline bool is_livepatch_module(struct module *mod)
{
return false;
}
#endif /* CONFIG_LIVEPATCH */
#else /* !CONFIG_MODULES... */ #else /* !CONFIG_MODULES... */
/* Given an address, look for it in the exception tables. */ /* Given an address, look for it in the exception tables. */
......
...@@ -285,6 +285,7 @@ typedef struct elf64_phdr { ...@@ -285,6 +285,7 @@ typedef struct elf64_phdr {
#define SHF_WRITE 0x1 #define SHF_WRITE 0x1
#define SHF_ALLOC 0x2 #define SHF_ALLOC 0x2
#define SHF_EXECINSTR 0x4 #define SHF_EXECINSTR 0x4
#define SHF_RELA_LIVEPATCH 0x00100000
#define SHF_MASKPROC 0xf0000000 #define SHF_MASKPROC 0xf0000000
/* special section indexes */ /* special section indexes */
...@@ -292,6 +293,7 @@ typedef struct elf64_phdr { ...@@ -292,6 +293,7 @@ typedef struct elf64_phdr {
#define SHN_LORESERVE 0xff00 #define SHN_LORESERVE 0xff00
#define SHN_LOPROC 0xff00 #define SHN_LOPROC 0xff00
#define SHN_HIPROC 0xff1f #define SHN_HIPROC 0xff1f
#define SHN_LIVEPATCH 0xff20
#define SHN_ABS 0xfff1 #define SHN_ABS 0xfff1
#define SHN_COMMON 0xfff2 #define SHN_COMMON 0xfff2
#define SHN_HIRESERVE 0xffff #define SHN_HIRESERVE 0xffff
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/livepatch.h> #include <linux/livepatch.h>
#include <linux/elf.h>
#include <linux/moduleloader.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
/** /**
...@@ -204,75 +206,109 @@ static int klp_find_object_symbol(const char *objname, const char *name, ...@@ -204,75 +206,109 @@ static int klp_find_object_symbol(const char *objname, const char *name,
return -EINVAL; return -EINVAL;
} }
/* static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
* external symbols are located outside the parent object (where the parent
* object is either vmlinux or the kmod being patched).
*/
static int klp_find_external_symbol(struct module *pmod, const char *name,
unsigned long *addr)
{ {
const struct kernel_symbol *sym; int i, cnt, vmlinux, ret;
char objname[MODULE_NAME_LEN];
/* first, check if it's an exported symbol */ char symname[KSYM_NAME_LEN];
preempt_disable(); char *strtab = pmod->core_kallsyms.strtab;
sym = find_symbol(name, NULL, NULL, true, true); Elf_Rela *relas;
if (sym) { Elf_Sym *sym;
*addr = sym->value; unsigned long sympos, addr;
preempt_enable();
return 0;
}
preempt_enable();
/* /*
* Check if it's in another .o within the patch module. This also * Since the field widths for objname and symname in the sscanf()
* checks that the external symbol is unique. * call are hard-coded and correspond to MODULE_NAME_LEN and
* KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
* and KSYM_NAME_LEN have the values we expect them to have.
*
* Because the value of MODULE_NAME_LEN can differ among architectures,
* we use the smallest/strictest upper bound possible (56, based on
* the current definition of MODULE_NAME_LEN) to prevent overflows.
*/ */
return klp_find_object_symbol(pmod->name, name, 0, addr); BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
relas = (Elf_Rela *) relasec->sh_addr;
/* For each rela in this klp relocation section */
for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
if (sym->st_shndx != SHN_LIVEPATCH) {
pr_err("symbol %s is not marked as a livepatch symbol",
strtab + sym->st_name);
return -EINVAL;
}
/* Format: .klp.sym.objname.symname,sympos */
cnt = sscanf(strtab + sym->st_name,
".klp.sym.%55[^.].%127[^,],%lu",
objname, symname, &sympos);
if (cnt != 3) {
pr_err("symbol %s has an incorrectly formatted name",
strtab + sym->st_name);
return -EINVAL;
}
/* klp_find_object_symbol() treats a NULL objname as vmlinux */
vmlinux = !strcmp(objname, "vmlinux");
ret = klp_find_object_symbol(vmlinux ? NULL : objname,
symname, sympos, &addr);
if (ret)
return ret;
sym->st_value = addr;
}
return 0;
} }
static int klp_write_object_relocations(struct module *pmod, static int klp_write_object_relocations(struct module *pmod,
struct klp_object *obj) struct klp_object *obj)
{ {
int ret = 0; int i, cnt, ret = 0;
unsigned long val; const char *objname, *secname;
struct klp_reloc *reloc; char sec_objname[MODULE_NAME_LEN];
Elf_Shdr *sec;
if (WARN_ON(!klp_is_object_loaded(obj))) if (WARN_ON(!klp_is_object_loaded(obj)))
return -EINVAL; return -EINVAL;
if (WARN_ON(!obj->relocs)) objname = klp_is_module(obj) ? obj->name : "vmlinux";
return -EINVAL;
module_disable_ro(pmod); module_disable_ro(pmod);
/* For each klp relocation section */
for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
sec = pmod->klp_info->sechdrs + i;
secname = pmod->klp_info->secstrings + sec->sh_name;
if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
continue;
for (reloc = obj->relocs; reloc->name; reloc++) { /*
/* discover the address of the referenced symbol */ * Format: .klp.rela.sec_objname.section_name
if (reloc->external) { * See comment in klp_resolve_symbols() for an explanation
if (reloc->sympos > 0) { * of the selected field width value.
pr_err("non-zero sympos for external reloc symbol '%s' is not supported\n", */
reloc->name); cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
if (cnt != 1) {
pr_err("section %s has an incorrectly formatted name",
secname);
ret = -EINVAL; ret = -EINVAL;
goto out; break;
} }
ret = klp_find_external_symbol(pmod, reloc->name, &val);
} else if (strcmp(objname, sec_objname))
ret = klp_find_object_symbol(obj->name, continue;
reloc->name,
reloc->sympos, ret = klp_resolve_symbols(sec, pmod);
&val);
if (ret) if (ret)
goto out; break;
ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc, ret = apply_relocate_add(pmod->klp_info->sechdrs,
val + reloc->addend); pmod->core_kallsyms.strtab,
if (ret) { pmod->klp_info->symndx, i, pmod);
pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n", if (ret)
reloc->name, val, ret); break;
goto out;
}
} }
out:
module_enable_ro(pmod); module_enable_ro(pmod);
return ret; return ret;
} }
...@@ -298,6 +334,19 @@ static void notrace klp_ftrace_handler(unsigned long ip, ...@@ -298,6 +334,19 @@ static void notrace klp_ftrace_handler(unsigned long ip,
rcu_read_unlock(); rcu_read_unlock();
} }
/*
* Convert a function address into the appropriate ftrace location.
*
* Usually this is just the address of the function, but on some architectures
* it's more complicated so allow them to provide a custom behaviour.
*/
#ifndef klp_get_ftrace_location
static unsigned long klp_get_ftrace_location(unsigned long faddr)
{
return faddr;
}
#endif
static void klp_disable_func(struct klp_func *func) static void klp_disable_func(struct klp_func *func)
{ {
struct klp_ops *ops; struct klp_ops *ops;
...@@ -312,8 +361,14 @@ static void klp_disable_func(struct klp_func *func) ...@@ -312,8 +361,14 @@ static void klp_disable_func(struct klp_func *func)
return; return;
if (list_is_singular(&ops->func_stack)) { if (list_is_singular(&ops->func_stack)) {
unsigned long ftrace_loc;
ftrace_loc = klp_get_ftrace_location(func->old_addr);
if (WARN_ON(!ftrace_loc))
return;
WARN_ON(unregister_ftrace_function(&ops->fops)); WARN_ON(unregister_ftrace_function(&ops->fops));
WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0)); WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
list_del_rcu(&func->stack_node); list_del_rcu(&func->stack_node);
list_del(&ops->node); list_del(&ops->node);
...@@ -338,6 +393,15 @@ static int klp_enable_func(struct klp_func *func) ...@@ -338,6 +393,15 @@ static int klp_enable_func(struct klp_func *func)
ops = klp_find_ops(func->old_addr); ops = klp_find_ops(func->old_addr);
if (!ops) { if (!ops) {
unsigned long ftrace_loc;
ftrace_loc = klp_get_ftrace_location(func->old_addr);
if (!ftrace_loc) {
pr_err("failed to find location for function '%s'\n",
func->old_name);
return -EINVAL;
}
ops = kzalloc(sizeof(*ops), GFP_KERNEL); ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops) if (!ops)
return -ENOMEM; return -ENOMEM;
...@@ -352,7 +416,7 @@ static int klp_enable_func(struct klp_func *func) ...@@ -352,7 +416,7 @@ static int klp_enable_func(struct klp_func *func)
INIT_LIST_HEAD(&ops->func_stack); INIT_LIST_HEAD(&ops->func_stack);
list_add_rcu(&func->stack_node, &ops->func_stack); list_add_rcu(&func->stack_node, &ops->func_stack);
ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0); ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
if (ret) { if (ret) {
pr_err("failed to set ftrace filter for function '%s' (%d)\n", pr_err("failed to set ftrace filter for function '%s' (%d)\n",
func->old_name, ret); func->old_name, ret);
...@@ -363,7 +427,7 @@ static int klp_enable_func(struct klp_func *func) ...@@ -363,7 +427,7 @@ static int klp_enable_func(struct klp_func *func)
if (ret) { if (ret) {
pr_err("failed to register ftrace handler for function '%s' (%d)\n", pr_err("failed to register ftrace handler for function '%s' (%d)\n",
func->old_name, ret); func->old_name, ret);
ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0); ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
goto err; goto err;
} }
...@@ -683,6 +747,9 @@ static void klp_free_patch(struct klp_patch *patch) ...@@ -683,6 +747,9 @@ static void klp_free_patch(struct klp_patch *patch)
static int klp_init_func(struct klp_object *obj, struct klp_func *func) static int klp_init_func(struct klp_object *obj, struct klp_func *func)
{ {
if (!func->old_name || !func->new_func)
return -EINVAL;
INIT_LIST_HEAD(&func->stack_node); INIT_LIST_HEAD(&func->stack_node);
func->state = KLP_DISABLED; func->state = KLP_DISABLED;
...@@ -703,11 +770,9 @@ static int klp_init_object_loaded(struct klp_patch *patch, ...@@ -703,11 +770,9 @@ static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_func *func; struct klp_func *func;
int ret; int ret;
if (obj->relocs) {
ret = klp_write_object_relocations(patch->mod, obj); ret = klp_write_object_relocations(patch->mod, obj);
if (ret) if (ret)
return ret; return ret;
}
klp_for_each_func(obj, func) { klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name, ret = klp_find_object_symbol(obj->name, func->old_name,
...@@ -842,12 +907,18 @@ int klp_register_patch(struct klp_patch *patch) ...@@ -842,12 +907,18 @@ int klp_register_patch(struct klp_patch *patch)
{ {
int ret; int ret;
if (!klp_initialized())
return -ENODEV;
if (!patch || !patch->mod) if (!patch || !patch->mod)
return -EINVAL; return -EINVAL;
if (!is_livepatch_module(patch->mod)) {
pr_err("module %s is not marked as a livepatch module",
patch->mod->name);
return -EINVAL;
}
if (!klp_initialized())
return -ENODEV;
/* /*
* A reference is taken on the patch module to prevent it from being * A reference is taken on the patch module to prevent it from being
* unloaded. Right now, we don't allow patch modules to unload since * unloaded. Right now, we don't allow patch modules to unload since
......
...@@ -1973,6 +1973,83 @@ static void module_enable_nx(const struct module *mod) { } ...@@ -1973,6 +1973,83 @@ static void module_enable_nx(const struct module *mod) { }
static void module_disable_nx(const struct module *mod) { } static void module_disable_nx(const struct module *mod) { }
#endif #endif
#ifdef CONFIG_LIVEPATCH
/*
* Persist Elf information about a module. Copy the Elf header,
* section header table, section string table, and symtab section
* index from info to mod->klp_info.
*/
static int copy_module_elf(struct module *mod, struct load_info *info)
{
unsigned int size, symndx;
int ret;
size = sizeof(*mod->klp_info);
mod->klp_info = kmalloc(size, GFP_KERNEL);
if (mod->klp_info == NULL)
return -ENOMEM;
/* Elf header */
size = sizeof(mod->klp_info->hdr);
memcpy(&mod->klp_info->hdr, info->hdr, size);
/* Elf section header table */
size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
mod->klp_info->sechdrs = kmalloc(size, GFP_KERNEL);
if (mod->klp_info->sechdrs == NULL) {
ret = -ENOMEM;
goto free_info;
}
memcpy(mod->klp_info->sechdrs, info->sechdrs, size);
/* Elf section name string table */
size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
mod->klp_info->secstrings = kmalloc(size, GFP_KERNEL);
if (mod->klp_info->secstrings == NULL) {
ret = -ENOMEM;
goto free_sechdrs;
}
memcpy(mod->klp_info->secstrings, info->secstrings, size);
/* Elf symbol section index */
symndx = info->index.sym;
mod->klp_info->symndx = symndx;
/*
* For livepatch modules, core_kallsyms.symtab is a complete
* copy of the original symbol table. Adjust sh_addr to point
* to core_kallsyms.symtab since the copy of the symtab in module
* init memory is freed at the end of do_init_module().
*/
mod->klp_info->sechdrs[symndx].sh_addr = \
(unsigned long) mod->core_kallsyms.symtab;
return 0;
free_sechdrs:
kfree(mod->klp_info->sechdrs);
free_info:
kfree(mod->klp_info);
return ret;
}
static void free_module_elf(struct module *mod)
{
kfree(mod->klp_info->sechdrs);
kfree(mod->klp_info->secstrings);
kfree(mod->klp_info);
}
#else /* !CONFIG_LIVEPATCH */
static int copy_module_elf(struct module *mod, struct load_info *info)
{
return 0;
}
static void free_module_elf(struct module *mod)
{
}
#endif /* CONFIG_LIVEPATCH */
void __weak module_memfree(void *module_region) void __weak module_memfree(void *module_region)
{ {
vfree(module_region); vfree(module_region);
...@@ -2011,6 +2088,9 @@ static void free_module(struct module *mod) ...@@ -2011,6 +2088,9 @@ static void free_module(struct module *mod)
/* Free any allocated parameters. */ /* Free any allocated parameters. */
destroy_params(mod->kp, mod->num_kp); destroy_params(mod->kp, mod->num_kp);
if (is_livepatch_module(mod))
free_module_elf(mod);
/* Now we can delete it from the lists */ /* Now we can delete it from the lists */
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
/* Unlink carefully: kallsyms could be walking list. */ /* Unlink carefully: kallsyms could be walking list. */
...@@ -2126,6 +2206,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info) ...@@ -2126,6 +2206,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
(long)sym[i].st_value); (long)sym[i].st_value);
break; break;
case SHN_LIVEPATCH:
/* Livepatch symbols are resolved by livepatch */
break;
case SHN_UNDEF: case SHN_UNDEF:
ksym = resolve_symbol_wait(mod, info, name); ksym = resolve_symbol_wait(mod, info, name);
/* Ok if resolved. */ /* Ok if resolved. */
...@@ -2174,6 +2258,10 @@ static int apply_relocations(struct module *mod, const struct load_info *info) ...@@ -2174,6 +2258,10 @@ static int apply_relocations(struct module *mod, const struct load_info *info)
if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
continue; continue;
/* Livepatch relocation sections are applied by livepatch */
if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
continue;
if (info->sechdrs[i].sh_type == SHT_REL) if (info->sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(info->sechdrs, info->strtab, err = apply_relocate(info->sechdrs, info->strtab,
info->index.sym, i, mod); info->index.sym, i, mod);
...@@ -2469,7 +2557,7 @@ static void layout_symtab(struct module *mod, struct load_info *info) ...@@ -2469,7 +2557,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
/* Compute total space required for the core symbols' strtab. */ /* Compute total space required for the core symbols' strtab. */
for (ndst = i = 0; i < nsrc; i++) { for (ndst = i = 0; i < nsrc; i++) {
if (i == 0 || if (i == 0 || is_livepatch_module(mod) ||
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
info->index.pcpu)) { info->index.pcpu)) {
strtab_size += strlen(&info->strtab[src[i].st_name])+1; strtab_size += strlen(&info->strtab[src[i].st_name])+1;
...@@ -2528,7 +2616,7 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) ...@@ -2528,7 +2616,7 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
src = mod->kallsyms->symtab; src = mod->kallsyms->symtab;
for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
if (i == 0 || if (i == 0 || is_livepatch_module(mod) ||
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
info->index.pcpu)) { info->index.pcpu)) {
dst[ndst] = src[i]; dst[ndst] = src[i];
...@@ -2667,6 +2755,26 @@ static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned l ...@@ -2667,6 +2755,26 @@ static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned l
return 0; return 0;
} }
#ifdef CONFIG_LIVEPATCH
static int find_livepatch_modinfo(struct module *mod, struct load_info *info)
{
mod->klp = get_modinfo(info, "livepatch") ? true : false;
return 0;
}
#else /* !CONFIG_LIVEPATCH */
static int find_livepatch_modinfo(struct module *mod, struct load_info *info)
{
if (get_modinfo(info, "livepatch")) {
pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
mod->name);
return -ENOEXEC;
}
return 0;
}
#endif /* CONFIG_LIVEPATCH */
/* Sets info->hdr and info->len. */ /* Sets info->hdr and info->len. */
static int copy_module_from_user(const void __user *umod, unsigned long len, static int copy_module_from_user(const void __user *umod, unsigned long len,
struct load_info *info) struct load_info *info)
...@@ -2821,6 +2929,10 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags) ...@@ -2821,6 +2929,10 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
"is unknown, you have been warned.\n", mod->name); "is unknown, you have been warned.\n", mod->name);
} }
err = find_livepatch_modinfo(mod, info);
if (err)
return err;
/* Set up license info based on the info section */ /* Set up license info based on the info section */
set_license(mod, get_modinfo(info, "license")); set_license(mod, get_modinfo(info, "license"));
...@@ -3494,6 +3606,12 @@ static int load_module(struct load_info *info, const char __user *uargs, ...@@ -3494,6 +3606,12 @@ static int load_module(struct load_info *info, const char __user *uargs,
if (err < 0) if (err < 0)
goto coming_cleanup; goto coming_cleanup;
if (is_livepatch_module(mod)) {
err = copy_module_elf(mod, info);
if (err < 0)
goto sysfs_cleanup;
}
/* Get rid of temporary copy. */ /* Get rid of temporary copy. */
free_copy(info); free_copy(info);
...@@ -3502,11 +3620,12 @@ static int load_module(struct load_info *info, const char __user *uargs, ...@@ -3502,11 +3620,12 @@ static int load_module(struct load_info *info, const char __user *uargs,
return do_init_module(mod); return do_init_module(mod);
sysfs_cleanup:
mod_sysfs_teardown(mod);
coming_cleanup: coming_cleanup:
blocking_notifier_call_chain(&module_notify_list, blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod); MODULE_STATE_GOING, mod);
klp_module_going(mod); klp_module_going(mod);
bug_cleanup: bug_cleanup:
/* module_bug_cleanup needs module_mutex protection */ /* module_bug_cleanup needs module_mutex protection */
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
......
...@@ -1530,7 +1530,19 @@ static int ftrace_cmp_recs(const void *a, const void *b) ...@@ -1530,7 +1530,19 @@ static int ftrace_cmp_recs(const void *a, const void *b)
return 0; return 0;
} }
static unsigned long ftrace_location_range(unsigned long start, unsigned long end) /**
* ftrace_location_range - return the first address of a traced location
* if it touches the given ip range
* @start: start of range to search.
* @end: end of range to search (inclusive). @end points to the last byte
* to check.
*
* Returns rec->ip if the related ftrace location is a least partly within
* the given address range. That is, the first address of the instruction
* that is either a NOP or call to the function tracer. It checks the ftrace
* internal tables to determine if the address belongs or not.
*/
unsigned long ftrace_location_range(unsigned long start, unsigned long end)
{ {
struct ftrace_page *pg; struct ftrace_page *pg;
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
......
...@@ -89,3 +89,4 @@ static void livepatch_exit(void) ...@@ -89,3 +89,4 @@ static void livepatch_exit(void)
module_init(livepatch_init); module_init(livepatch_init);
module_exit(livepatch_exit); module_exit(livepatch_exit);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_INFO(livepatch, "Y");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment