Commit 9fb4c525 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching

Pull livepatching updates from Jiri Kosina:

 - simplifications and improvements for issues Peter Ziljstra found
   during his previous work on W^X cleanups.

   This allows us to remove livepatch arch-specific .klp.arch sections
   and add proper support for jump labels in patched code.

   Also, this patchset removes the last module_disable_ro() usage in the
   tree.

   Patches from Josh Poimboeuf and Peter Zijlstra

 - a few other minor cleanups

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching:
  MAINTAINERS: add lib/livepatch to LIVE PATCHING
  livepatch: add arch-specific headers to MAINTAINERS
  livepatch: Make klp_apply_object_relocs static
  MAINTAINERS: adjust to livepatch .klp.arch removal
  module: Make module_enable_ro() static again
  x86/module: Use text_mutex in apply_relocate_add()
  module: Remove module_disable_ro()
  livepatch: Remove module_disable_ro() usage
  x86/module: Use text_poke() for late relocations
  s390/module: Use s390_kernel_write() for late relocations
  s390: Change s390_kernel_write() return type to match memcpy()
  livepatch: Prevent module-specific KLP rela sections from referencing vmlinux symbols
  livepatch: Remove .klp.arch
  livepatch: Apply vmlinux-specific KLP relocations early
  livepatch: Disallow vmlinux.ko
parents a789d5f8 f55d9895
...@@ -14,8 +14,7 @@ This document outlines the Elf format requirements that livepatch modules must f ...@@ -14,8 +14,7 @@ This document outlines the Elf format requirements that livepatch modules must f
4. Livepatch symbols 4. Livepatch symbols
4.1 A livepatch module's symbol table 4.1 A livepatch module's symbol table
4.2 Livepatch symbol format 4.2 Livepatch symbol format
5. Architecture-specific sections 5. Symbol table and Elf section access
6. Symbol table and Elf section access
1. Background and motivation 1. Background and motivation
============================ ============================
...@@ -298,17 +297,7 @@ Examples: ...@@ -298,17 +297,7 @@ Examples:
Note that the 'Ndx' (Section index) for these symbols is SHN_LIVEPATCH (0xff20). Note that the 'Ndx' (Section index) for these symbols is SHN_LIVEPATCH (0xff20).
"OS" means OS-specific. "OS" means OS-specific.
5. Architecture-specific sections 5. Symbol table and Elf section access
=================================
Architectures may override arch_klp_init_object_loaded() to perform
additional arch-specific tasks when a target module loads, such as applying
arch-specific sections. On x86 for example, we must apply per-object
.altinstructions and .parainstructions sections when a target module loads.
These sections must be prefixed with ".klp.arch.$objname." so that they can
be easily identified when iterating through a patch module's Elf sections
(See arch/x86/kernel/livepatch.c for a complete example).
6. Symbol table and Elf section access
====================================== ======================================
A livepatch module's symbol table is accessible through module->symtab. A livepatch module's symbol table is accessible through module->symtab.
......
...@@ -9936,10 +9936,12 @@ S: Maintained ...@@ -9936,10 +9936,12 @@ S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/livepatching/livepatching.git
F: Documentation/ABI/testing/sysfs-kernel-livepatch F: Documentation/ABI/testing/sysfs-kernel-livepatch
F: Documentation/livepatch/ F: Documentation/livepatch/
F: arch/powerpc/include/asm/livepatch.h
F: arch/s390/include/asm/livepatch.h
F: arch/x86/include/asm/livepatch.h F: arch/x86/include/asm/livepatch.h
F: arch/x86/kernel/livepatch.c
F: include/linux/livepatch.h F: include/linux/livepatch.h
F: kernel/livepatch/ F: kernel/livepatch/
F: lib/livepatch/
F: samples/livepatch/ F: samples/livepatch/
F: tools/testing/selftests/livepatch/ F: tools/testing/selftests/livepatch/
......
...@@ -276,6 +276,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo ...@@ -276,6 +276,6 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
} }
int copy_to_user_real(void __user *dest, void *src, unsigned long count); int copy_to_user_real(void __user *dest, void *src, unsigned long count);
void s390_kernel_write(void *dst, const void *src, size_t size); void *s390_kernel_write(void *dst, const void *src, size_t size);
#endif /* __S390_UACCESS_H */ #endif /* __S390_UACCESS_H */
This diff is collapsed.
...@@ -55,19 +55,22 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz ...@@ -55,19 +55,22 @@ static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t siz
*/ */
static DEFINE_SPINLOCK(s390_kernel_write_lock); static DEFINE_SPINLOCK(s390_kernel_write_lock);
void notrace s390_kernel_write(void *dst, const void *src, size_t size) notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
{ {
void *tmp = dst;
unsigned long flags; unsigned long flags;
long copied; long copied;
spin_lock_irqsave(&s390_kernel_write_lock, flags); spin_lock_irqsave(&s390_kernel_write_lock, flags);
while (size) { while (size) {
copied = s390_kernel_write_odd(dst, src, size); copied = s390_kernel_write_odd(tmp, src, size);
dst += copied; tmp += copied;
src += copied; src += copied;
size -= copied; size -= copied;
} }
spin_unlock_irqrestore(&s390_kernel_write_lock, flags); spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
return dst;
} }
static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count) static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
......
...@@ -362,3 +362,19 @@ void __init check_bugs(void) ...@@ -362,3 +362,19 @@ void __init check_bugs(void)
void apply_alternatives(struct alt_instr *start, struct alt_instr *end) void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{ {
} }
void *text_poke(void *addr, const void *opcode, size_t len)
{
/*
* In UML, the only reference to this function is in
* apply_relocate_add(), which shouldn't ever actually call this
* because UML doesn't have live patching.
*/
WARN_ON(1);
return memcpy(addr, opcode, len);
}
void text_poke_sync(void)
{
}
...@@ -90,7 +90,6 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o ...@@ -90,7 +90,6 @@ obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/ obj-y += apic/
obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o obj-$(CONFIG_FUNCTION_TRACER) += ftrace_$(BITS).o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
......
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* livepatch.c - x86-specific Kernel Live Patching Core
*/
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/livepatch.h>
#include <asm/text-patching.h>
/* Apply per-object alternatives. Based on x86 module_finalize() */
void arch_klp_init_object_loaded(struct klp_patch *patch,
struct klp_object *obj)
{
int cnt;
struct klp_modinfo *info;
Elf_Shdr *s, *alt = NULL, *para = NULL;
void *aseg, *pseg;
const char *objname;
char sec_objname[MODULE_NAME_LEN];
char secname[KSYM_NAME_LEN];
info = patch->mod->klp_info;
objname = obj->name ? obj->name : "vmlinux";
/* See livepatch core code for BUILD_BUG_ON() explanation */
BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
for (s = info->sechdrs; s < info->sechdrs + info->hdr.e_shnum; s++) {
/* Apply per-object .klp.arch sections */
cnt = sscanf(info->secstrings + s->sh_name,
".klp.arch.%55[^.].%127s",
sec_objname, secname);
if (cnt != 2)
continue;
if (strcmp(sec_objname, objname))
continue;
if (!strcmp(".altinstructions", secname))
alt = s;
if (!strcmp(".parainstructions", secname))
para = s;
}
if (alt) {
aseg = (void *) alt->sh_addr;
apply_alternatives(aseg, aseg + alt->sh_size);
}
if (para) {
pseg = (void *) para->sh_addr;
apply_paravirt(pseg, pseg + para->sh_size);
}
}
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/jump_label.h> #include <linux/jump_label.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/memory.h>
#include <asm/text-patching.h> #include <asm/text-patching.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -126,11 +127,12 @@ int apply_relocate(Elf32_Shdr *sechdrs, ...@@ -126,11 +127,12 @@ int apply_relocate(Elf32_Shdr *sechdrs,
return 0; return 0;
} }
#else /*X86_64*/ #else /*X86_64*/
int apply_relocate_add(Elf64_Shdr *sechdrs, static int __apply_relocate_add(Elf64_Shdr *sechdrs,
const char *strtab, const char *strtab,
unsigned int symindex, unsigned int symindex,
unsigned int relsec, unsigned int relsec,
struct module *me) struct module *me,
void *(*write)(void *dest, const void *src, size_t len))
{ {
unsigned int i; unsigned int i;
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
...@@ -162,19 +164,19 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -162,19 +164,19 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_X86_64_64: case R_X86_64_64:
if (*(u64 *)loc != 0) if (*(u64 *)loc != 0)
goto invalid_relocation; goto invalid_relocation;
*(u64 *)loc = val; write(loc, &val, 8);
break; break;
case R_X86_64_32: case R_X86_64_32:
if (*(u32 *)loc != 0) if (*(u32 *)loc != 0)
goto invalid_relocation; goto invalid_relocation;
*(u32 *)loc = val; write(loc, &val, 4);
if (val != *(u32 *)loc) if (val != *(u32 *)loc)
goto overflow; goto overflow;
break; break;
case R_X86_64_32S: case R_X86_64_32S:
if (*(s32 *)loc != 0) if (*(s32 *)loc != 0)
goto invalid_relocation; goto invalid_relocation;
*(s32 *)loc = val; write(loc, &val, 4);
if ((s64)val != *(s32 *)loc) if ((s64)val != *(s32 *)loc)
goto overflow; goto overflow;
break; break;
...@@ -183,7 +185,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -183,7 +185,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (*(u32 *)loc != 0) if (*(u32 *)loc != 0)
goto invalid_relocation; goto invalid_relocation;
val -= (u64)loc; val -= (u64)loc;
*(u32 *)loc = val; write(loc, &val, 4);
#if 0 #if 0
if ((s64)val != *(s32 *)loc) if ((s64)val != *(s32 *)loc)
goto overflow; goto overflow;
...@@ -193,7 +195,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -193,7 +195,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
if (*(u64 *)loc != 0) if (*(u64 *)loc != 0)
goto invalid_relocation; goto invalid_relocation;
val -= (u64)loc; val -= (u64)loc;
*(u64 *)loc = val; write(loc, &val, 8);
break; break;
default: default:
pr_err("%s: Unknown rela relocation: %llu\n", pr_err("%s: Unknown rela relocation: %llu\n",
...@@ -215,6 +217,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, ...@@ -215,6 +217,33 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
me->name); me->name);
return -ENOEXEC; return -ENOEXEC;
} }
int apply_relocate_add(Elf64_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
int ret;
bool early = me->state == MODULE_STATE_UNFORMED;
void *(*write)(void *, const void *, size_t) = memcpy;
if (!early) {
write = text_poke;
mutex_lock(&text_mutex);
}
ret = __apply_relocate_add(sechdrs, strtab, symindex, relsec, me,
write);
if (!early) {
text_poke_sync();
mutex_unlock(&text_mutex);
}
return ret;
}
#endif #endif
int module_finalize(const Elf_Ehdr *hdr, int module_finalize(const Elf_Ehdr *hdr,
......
...@@ -195,9 +195,6 @@ struct klp_patch { ...@@ -195,9 +195,6 @@ struct klp_patch {
int klp_enable_patch(struct klp_patch *); int klp_enable_patch(struct klp_patch *);
void arch_klp_init_object_loaded(struct klp_patch *patch,
struct klp_object *obj);
/* Called from the module loader during module coming/going states */ /* Called from the module loader during module coming/going states */
int klp_module_coming(struct module *mod); int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod); void klp_module_going(struct module *mod);
...@@ -234,6 +231,11 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); ...@@ -234,6 +231,11 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id); struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
struct klp_state *klp_get_prev_state(unsigned long id); struct klp_state *klp_get_prev_state(unsigned long id);
int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
const char *shstrtab, const char *strtab,
unsigned int symindex, unsigned int secindex,
const char *objname);
#else /* !CONFIG_LIVEPATCH */ #else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; } static inline int klp_module_coming(struct module *mod) { return 0; }
...@@ -242,6 +244,15 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; } ...@@ -242,6 +244,15 @@ static inline bool klp_patch_pending(struct task_struct *task) { return false; }
static inline void klp_update_patch_state(struct task_struct *task) {} static inline void klp_update_patch_state(struct task_struct *task) {}
static inline void klp_copy_process(struct task_struct *child) {} static inline void klp_copy_process(struct task_struct *child) {}
static inline
int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
const char *shstrtab, const char *strtab,
unsigned int symindex, unsigned int secindex,
const char *objname)
{
return 0;
}
#endif /* CONFIG_LIVEPATCH */ #endif /* CONFIG_LIVEPATCH */
#endif /* _LINUX_LIVEPATCH_H_ */ #endif /* _LINUX_LIVEPATCH_H_ */
...@@ -866,14 +866,6 @@ extern int module_sysfs_initialized; ...@@ -866,14 +866,6 @@ extern int module_sysfs_initialized;
#define __MODULE_STRING(x) __stringify(x) #define __MODULE_STRING(x) __stringify(x)
#ifdef CONFIG_STRICT_MODULE_RWX
extern void module_enable_ro(const struct module *mod, bool after_init);
extern void module_disable_ro(const struct module *mod);
#else
static inline void module_enable_ro(const struct module *mod, bool after_init) { }
static inline void module_disable_ro(const struct module *mod) { }
#endif
#ifdef CONFIG_GENERIC_BUG #ifdef CONFIG_GENERIC_BUG
void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *,
struct module *); struct module *);
......
...@@ -191,18 +191,21 @@ static int klp_find_object_symbol(const char *objname, const char *name, ...@@ -191,18 +191,21 @@ static int klp_find_object_symbol(const char *objname, const char *name,
return -EINVAL; return -EINVAL;
} }
static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) static int klp_resolve_symbols(Elf64_Shdr *sechdrs, const char *strtab,
unsigned int symndx, Elf_Shdr *relasec,
const char *sec_objname)
{ {
int i, cnt, vmlinux, ret; int i, cnt, ret;
char objname[MODULE_NAME_LEN]; char sym_objname[MODULE_NAME_LEN];
char symname[KSYM_NAME_LEN]; char sym_name[KSYM_NAME_LEN];
char *strtab = pmod->core_kallsyms.strtab;
Elf_Rela *relas; Elf_Rela *relas;
Elf_Sym *sym; Elf_Sym *sym;
unsigned long sympos, addr; unsigned long sympos, addr;
bool sym_vmlinux;
bool sec_vmlinux = !strcmp(sec_objname, "vmlinux");
/* /*
* Since the field widths for objname and symname in the sscanf() * Since the field widths for sym_objname and sym_name in the sscanf()
* call are hard-coded and correspond to MODULE_NAME_LEN and * call are hard-coded and correspond to MODULE_NAME_LEN and
* KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
* and KSYM_NAME_LEN have the values we expect them to have. * and KSYM_NAME_LEN have the values we expect them to have.
...@@ -216,27 +219,40 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) ...@@ -216,27 +219,40 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
relas = (Elf_Rela *) relasec->sh_addr; relas = (Elf_Rela *) relasec->sh_addr;
/* For each rela in this klp relocation section */ /* For each rela in this klp relocation section */
for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) { for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info); sym = (Elf64_Sym *)sechdrs[symndx].sh_addr + ELF_R_SYM(relas[i].r_info);
if (sym->st_shndx != SHN_LIVEPATCH) { if (sym->st_shndx != SHN_LIVEPATCH) {
pr_err("symbol %s is not marked as a livepatch symbol\n", pr_err("symbol %s is not marked as a livepatch symbol\n",
strtab + sym->st_name); strtab + sym->st_name);
return -EINVAL; return -EINVAL;
} }
/* Format: .klp.sym.objname.symname,sympos */ /* Format: .klp.sym.sym_objname.sym_name,sympos */
cnt = sscanf(strtab + sym->st_name, cnt = sscanf(strtab + sym->st_name,
".klp.sym.%55[^.].%127[^,],%lu", ".klp.sym.%55[^.].%127[^,],%lu",
objname, symname, &sympos); sym_objname, sym_name, &sympos);
if (cnt != 3) { if (cnt != 3) {
pr_err("symbol %s has an incorrectly formatted name\n", pr_err("symbol %s has an incorrectly formatted name\n",
strtab + sym->st_name); strtab + sym->st_name);
return -EINVAL; return -EINVAL;
} }
sym_vmlinux = !strcmp(sym_objname, "vmlinux");
/*
* Prevent module-specific KLP rela sections from referencing
* vmlinux symbols. This helps prevent ordering issues with
* module special section initializations. Presumably such
* symbols are exported and normal relas can be used instead.
*/
if (!sec_vmlinux && sym_vmlinux) {
pr_err("invalid access to vmlinux symbol '%s' from module-specific livepatch relocation section",
sym_name);
return -EINVAL;
}
/* klp_find_object_symbol() treats a NULL objname as vmlinux */ /* klp_find_object_symbol() treats a NULL objname as vmlinux */
vmlinux = !strcmp(objname, "vmlinux"); ret = klp_find_object_symbol(sym_vmlinux ? NULL : sym_objname,
ret = klp_find_object_symbol(vmlinux ? NULL : objname, sym_name, sympos, &addr);
symname, sympos, &addr);
if (ret) if (ret)
return ret; return ret;
...@@ -246,54 +262,59 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod) ...@@ -246,54 +262,59 @@ static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
return 0; return 0;
} }
static int klp_write_object_relocations(struct module *pmod, /*
struct klp_object *obj) * At a high-level, there are two types of klp relocation sections: those which
* reference symbols which live in vmlinux; and those which reference symbols
* which live in other modules. This function is called for both types:
*
* 1) When a klp module itself loads, the module code calls this function to
* write vmlinux-specific klp relocations (.klp.rela.vmlinux.* sections).
* These relocations are written to the klp module text to allow the patched
* code/data to reference unexported vmlinux symbols. They're written as
* early as possible to ensure that other module init code (.e.g.,
* jump_label_apply_nops) can access any unexported vmlinux symbols which
* might be referenced by the klp module's special sections.
*
* 2) When a to-be-patched module loads -- or is already loaded when a
* corresponding klp module loads -- klp code calls this function to write
* module-specific klp relocations (.klp.rela.{module}.* sections). These
* are written to the klp module text to allow the patched code/data to
* reference symbols which live in the to-be-patched module or one of its
* module dependencies. Exported symbols are supported, in addition to
* unexported symbols, in order to enable late module patching, which allows
* the to-be-patched module to be loaded and patched sometime *after* the
* klp module is loaded.
*/
int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
const char *shstrtab, const char *strtab,
unsigned int symndx, unsigned int secndx,
const char *objname)
{ {
int i, cnt, ret = 0; int cnt, ret;
const char *objname, *secname;
char sec_objname[MODULE_NAME_LEN]; char sec_objname[MODULE_NAME_LEN];
Elf_Shdr *sec; Elf_Shdr *sec = sechdrs + secndx;
if (WARN_ON(!klp_is_object_loaded(obj))) /*
* Format: .klp.rela.sec_objname.section_name
* See comment in klp_resolve_symbols() for an explanation
* of the selected field width value.
*/
cnt = sscanf(shstrtab + sec->sh_name, ".klp.rela.%55[^.]",
sec_objname);
if (cnt != 1) {
pr_err("section %s has an incorrectly formatted name\n",
shstrtab + sec->sh_name);
return -EINVAL; return -EINVAL;
}
objname = klp_is_module(obj) ? obj->name : "vmlinux"; if (strcmp(objname ? objname : "vmlinux", sec_objname))
return 0;
/* For each klp relocation section */
for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
sec = pmod->klp_info->sechdrs + i;
secname = pmod->klp_info->secstrings + sec->sh_name;
if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
continue;
/*
* Format: .klp.rela.sec_objname.section_name
* See comment in klp_resolve_symbols() for an explanation
* of the selected field width value.
*/
cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
if (cnt != 1) {
pr_err("section %s has an incorrectly formatted name\n",
secname);
ret = -EINVAL;
break;
}
if (strcmp(objname, sec_objname))
continue;
ret = klp_resolve_symbols(sec, pmod);
if (ret)
break;
ret = apply_relocate_add(pmod->klp_info->sechdrs, ret = klp_resolve_symbols(sechdrs, strtab, symndx, sec, sec_objname);
pmod->core_kallsyms.strtab, if (ret)
pmod->klp_info->symndx, i, pmod); return ret;
if (ret)
break;
}
return ret; return apply_relocate_add(sechdrs, strtab, symndx, secndx, pmod);
} }
/* /*
...@@ -724,10 +745,27 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) ...@@ -724,10 +745,27 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
func->old_sympos ? func->old_sympos : 1); func->old_sympos ? func->old_sympos : 1);
} }
/* Arches may override this to finish any remaining arch-specific tasks */ static int klp_apply_object_relocs(struct klp_patch *patch,
void __weak arch_klp_init_object_loaded(struct klp_patch *patch, struct klp_object *obj)
struct klp_object *obj)
{ {
int i, ret;
struct klp_modinfo *info = patch->mod->klp_info;
for (i = 1; i < info->hdr.e_shnum; i++) {
Elf_Shdr *sec = info->sechdrs + i;
if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
continue;
ret = klp_apply_section_relocs(patch->mod, info->sechdrs,
info->secstrings,
patch->mod->core_kallsyms.strtab,
info->symndx, i, obj->name);
if (ret)
return ret;
}
return 0;
} }
/* parts of the initialization that is done only when the object is loaded */ /* parts of the initialization that is done only when the object is loaded */
...@@ -737,21 +775,18 @@ static int klp_init_object_loaded(struct klp_patch *patch, ...@@ -737,21 +775,18 @@ static int klp_init_object_loaded(struct klp_patch *patch,
struct klp_func *func; struct klp_func *func;
int ret; int ret;
mutex_lock(&text_mutex); if (klp_is_module(obj)) {
/*
module_disable_ro(patch->mod); * Only write module-specific relocations here
ret = klp_write_object_relocations(patch->mod, obj); * (.klp.rela.{module}.*). vmlinux-specific relocations were
if (ret) { * written earlier during the initialization of the klp module
module_enable_ro(patch->mod, true); * itself.
mutex_unlock(&text_mutex); */
return ret; ret = klp_apply_object_relocs(patch, obj);
if (ret)
return ret;
} }
arch_klp_init_object_loaded(patch, obj);
module_enable_ro(patch->mod, true);
mutex_unlock(&text_mutex);
klp_for_each_func(obj, func) { klp_for_each_func(obj, func) {
ret = klp_find_object_symbol(obj->name, func->old_name, ret = klp_find_object_symbol(obj->name, func->old_name,
func->old_sympos, func->old_sympos,
...@@ -1139,6 +1174,11 @@ int klp_module_coming(struct module *mod) ...@@ -1139,6 +1174,11 @@ int klp_module_coming(struct module *mod)
if (WARN_ON(mod->state != MODULE_STATE_COMING)) if (WARN_ON(mod->state != MODULE_STATE_COMING))
return -EINVAL; return -EINVAL;
if (!strcmp(mod->name, "vmlinux")) {
pr_err("vmlinux.ko: invalid module name");
return -EINVAL;
}
mutex_lock(&klp_mutex); mutex_lock(&klp_mutex);
/* /*
* Each module has to know that klp_module_coming() * Each module has to know that klp_module_coming()
......
...@@ -2000,20 +2000,7 @@ static void frob_writable_data(const struct module_layout *layout, ...@@ -2000,20 +2000,7 @@ static void frob_writable_data(const struct module_layout *layout,
(layout->size - layout->ro_after_init_size) >> PAGE_SHIFT); (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
} }
/* livepatching wants to disable read-only so it can frob module. */ static void module_enable_ro(const struct module *mod, bool after_init)
void module_disable_ro(const struct module *mod)
{
if (!rodata_enabled)
return;
frob_text(&mod->core_layout, set_memory_rw);
frob_rodata(&mod->core_layout, set_memory_rw);
frob_ro_after_init(&mod->core_layout, set_memory_rw);
frob_text(&mod->init_layout, set_memory_rw);
frob_rodata(&mod->init_layout, set_memory_rw);
}
void module_enable_ro(const struct module *mod, bool after_init)
{ {
if (!rodata_enabled) if (!rodata_enabled)
return; return;
...@@ -2041,6 +2028,7 @@ static void module_enable_nx(const struct module *mod) ...@@ -2041,6 +2028,7 @@ static void module_enable_nx(const struct module *mod)
#else /* !CONFIG_STRICT_MODULE_RWX */ #else /* !CONFIG_STRICT_MODULE_RWX */
static void module_enable_nx(const struct module *mod) { } static void module_enable_nx(const struct module *mod) { }
static void module_enable_ro(const struct module *mod, bool after_init) {}
#endif /* CONFIG_STRICT_MODULE_RWX */ #endif /* CONFIG_STRICT_MODULE_RWX */
static void module_enable_x(const struct module *mod) static void module_enable_x(const struct module *mod)
{ {
...@@ -2337,11 +2325,13 @@ static int apply_relocations(struct module *mod, const struct load_info *info) ...@@ -2337,11 +2325,13 @@ static int apply_relocations(struct module *mod, const struct load_info *info)
if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
continue; continue;
/* Livepatch relocation sections are applied by livepatch */
if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH) if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
continue; err = klp_apply_section_relocs(mod, info->sechdrs,
info->secstrings,
if (info->sechdrs[i].sh_type == SHT_REL) info->strtab,
info->index.sym, i,
NULL);
else if (info->sechdrs[i].sh_type == SHT_REL)
err = apply_relocate(info->sechdrs, info->strtab, err = apply_relocate(info->sechdrs, info->strtab,
info->index.sym, i, mod); info->index.sym, i, mod);
else if (info->sechdrs[i].sh_type == SHT_RELA) else if (info->sechdrs[i].sh_type == SHT_RELA)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment