Commit eee8c24f authored by Nadav Amit's avatar Nadav Amit Committed by Greg Kroah-Hartman

x86/modules: Avoid breaking W^X while loading modules

[ Upstream commit f2c65fb3 ]

When modules and BPF filters are loaded, there is a time window in
which some memory is both writable and executable. An attacker that has
already found another vulnerability (e.g., a dangling pointer) might be
able to exploit this behavior to overwrite kernel code. Prevent having
writable executable PTEs in this stage.

In addition, avoiding having W+X mappings can also slightly simplify the
patching of modules code on initialization (e.g., by alternatives and
static-key), as would be done in the next patch. This was actually the
main motivation for this patch.

To avoid having W+X mappings, set them initially as RW (NX) and after
they are set as RO set them as X as well. Setting them as executable is
done as a separate step to avoid one core in which the old PTE is cached
(hence writable), and another which sees the updated PTE (executable),
which would break the W^X protection.
Suggested-by: default avatarThomas Gleixner <tglx@linutronix.de>
Suggested-by: default avatarAndy Lutomirski <luto@amacapital.net>
Signed-off-by: default avatarNadav Amit <namit@vmware.com>
Signed-off-by: default avatarRick Edgecombe <rick.p.edgecombe@intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: <akpm@linux-foundation.org>
Cc: <ard.biesheuvel@linaro.org>
Cc: <deneen.t.dock@intel.com>
Cc: <kernel-hardening@lists.openwall.com>
Cc: <kristen@linux.intel.com>
Cc: <linux_dti@icloud.com>
Cc: <will.deacon@arm.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jessica Yu <jeyu@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Rik van Riel <riel@surriel.com>
Link: https://lkml.kernel.org/r/20190426001143.4983-12-namit@vmware.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent f4edac5b
...@@ -667,15 +667,29 @@ void __init alternative_instructions(void) ...@@ -667,15 +667,29 @@ void __init alternative_instructions(void)
* handlers seeing an inconsistent instruction while you patch. * handlers seeing an inconsistent instruction while you patch.
*/ */
void *__init_or_module text_poke_early(void *addr, const void *opcode, void *__init_or_module text_poke_early(void *addr, const void *opcode,
size_t len) size_t len)
{ {
unsigned long flags; unsigned long flags;
local_irq_save(flags);
memcpy(addr, opcode, len); if (boot_cpu_has(X86_FEATURE_NX) &&
local_irq_restore(flags); is_module_text_address((unsigned long)addr)) {
sync_core(); /*
/* Could also do a CLFLUSH here to speed up CPU recovery; but * Modules text is marked initially as non-executable, so the
that causes hangs on some VIA CPUs. */ * code cannot be running and speculative code-fetches are
* prevented. Just change the code.
*/
memcpy(addr, opcode, len);
} else {
local_irq_save(flags);
memcpy(addr, opcode, len);
local_irq_restore(flags);
sync_core();
/*
* Could also do a CLFLUSH here to speed up CPU recovery; but
* that causes hangs on some VIA CPUs.
*/
}
return addr; return addr;
} }
......
...@@ -87,7 +87,7 @@ void *module_alloc(unsigned long size) ...@@ -87,7 +87,7 @@ void *module_alloc(unsigned long size)
p = __vmalloc_node_range(size, MODULE_ALIGN, p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(), MODULES_VADDR + get_module_load_offset(),
MODULES_END, GFP_KERNEL, MODULES_END, GFP_KERNEL,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0)); __builtin_return_address(0));
if (p && (kasan_module_alloc(p, size) < 0)) { if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p); vfree(p);
......
...@@ -746,6 +746,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) ...@@ -746,6 +746,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{ {
set_memory_ro((unsigned long)hdr, hdr->pages); set_memory_ro((unsigned long)hdr, hdr->pages);
set_memory_x((unsigned long)hdr, hdr->pages);
} }
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
......
...@@ -1950,8 +1950,13 @@ void module_enable_ro(const struct module *mod, bool after_init) ...@@ -1950,8 +1950,13 @@ void module_enable_ro(const struct module *mod, bool after_init)
return; return;
frob_text(&mod->core_layout, set_memory_ro); frob_text(&mod->core_layout, set_memory_ro);
frob_text(&mod->core_layout, set_memory_x);
frob_rodata(&mod->core_layout, set_memory_ro); frob_rodata(&mod->core_layout, set_memory_ro);
frob_text(&mod->init_layout, set_memory_ro); frob_text(&mod->init_layout, set_memory_ro);
frob_text(&mod->init_layout, set_memory_x);
frob_rodata(&mod->init_layout, set_memory_ro); frob_rodata(&mod->init_layout, set_memory_ro);
if (after_init) if (after_init)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment