Commit ce0c1c92 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'modules-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/linux

Pull modules updates from Luis Chamberlain:
 "Christophe Leroy did most of the work on this release, first with a
  few cleanups on CONFIG_STRICT_KERNEL_RWX and ending with error
  handling for when set_memory_XX() can fail.

  This is part of a larger effort to clean up all these callers which
  can fail, modules is just part of it"

* tag 'modules-6.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/linux:
  module: Don't ignore errors from set_memory_XX()
  lib/test_kmod: fix kernel-doc warnings
  powerpc: Simplify strict_kernel_rwx_enabled()
  modules: Remove #ifdef CONFIG_STRICT_MODULE_RWX around rodata_enabled
  init: Declare rodata_enabled and mark_rodata_ro() at all time
  module: Change module_enable_{nx/x/ro}() to more explicit names
  module: Use set_memory_rox()
parents 70ef6544 d1909c02
...@@ -330,17 +330,10 @@ static __always_inline bool early_radix_enabled(void) ...@@ -330,17 +330,10 @@ static __always_inline bool early_radix_enabled(void)
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX); return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
} }
#ifdef CONFIG_STRICT_KERNEL_RWX
static inline bool strict_kernel_rwx_enabled(void) static inline bool strict_kernel_rwx_enabled(void)
{ {
return rodata_enabled; return IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && rodata_enabled;
} }
#else
static inline bool strict_kernel_rwx_enabled(void)
{
return false;
}
#endif
static inline bool strict_module_rwx_enabled(void) static inline bool strict_module_rwx_enabled(void)
{ {
......
...@@ -168,12 +168,8 @@ extern initcall_entry_t __initcall_end[]; ...@@ -168,12 +168,8 @@ extern initcall_entry_t __initcall_end[];
extern struct file_system_type rootfs_fs_type; extern struct file_system_type rootfs_fs_type;
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
extern bool rodata_enabled; extern bool rodata_enabled;
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
void mark_rodata_ro(void); void mark_rodata_ro(void);
#endif
extern void (*late_time_init)(void); extern void (*late_time_init)(void);
......
...@@ -1401,10 +1401,9 @@ static int __init set_debug_rodata(char *str) ...@@ -1401,10 +1401,9 @@ static int __init set_debug_rodata(char *str)
early_param("rodata", set_debug_rodata); early_param("rodata", set_debug_rodata);
#endif #endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static void mark_readonly(void) static void mark_readonly(void)
{ {
if (rodata_enabled) { if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) && rodata_enabled) {
/* /*
* load_module() results in W+X mappings, which are cleaned * load_module() results in W+X mappings, which are cleaned
* up with call_rcu(). Let's make sure that queued work is * up with call_rcu(). Let's make sure that queued work is
...@@ -1414,20 +1413,14 @@ static void mark_readonly(void) ...@@ -1414,20 +1413,14 @@ static void mark_readonly(void)
rcu_barrier(); rcu_barrier();
mark_rodata_ro(); mark_rodata_ro();
rodata_test(); rodata_test();
} else } else if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
pr_info("Kernel memory protection disabled.\n"); pr_info("Kernel memory protection disabled.\n");
} else if (IS_ENABLED(CONFIG_ARCH_HAS_STRICT_KERNEL_RWX)) {
pr_warn("Kernel memory protection not selected by kernel config.\n");
} else {
pr_warn("This architecture does not have kernel memory protection.\n");
}
} }
#elif defined(CONFIG_ARCH_HAS_STRICT_KERNEL_RWX)
static inline void mark_readonly(void)
{
pr_warn("Kernel memory protection not selected by kernel config.\n");
}
#else
static inline void mark_readonly(void)
{
pr_warn("This architecture does not have kernel memory protection.\n");
}
#endif
void __weak free_initmem(void) void __weak free_initmem(void)
{ {
......
...@@ -322,9 +322,9 @@ static inline struct module *mod_find(unsigned long addr, struct mod_tree_root * ...@@ -322,9 +322,9 @@ static inline struct module *mod_find(unsigned long addr, struct mod_tree_root *
} }
#endif /* CONFIG_MODULES_TREE_LOOKUP */ #endif /* CONFIG_MODULES_TREE_LOOKUP */
void module_enable_ro(const struct module *mod, bool after_init); int module_enable_rodata_ro(const struct module *mod, bool after_init);
void module_enable_nx(const struct module *mod); int module_enable_data_nx(const struct module *mod);
void module_enable_x(const struct module *mod); int module_enable_text_rox(const struct module *mod);
int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
char *secstrings, struct module *mod); char *secstrings, struct module *mod);
......
...@@ -2571,7 +2571,9 @@ static noinline int do_init_module(struct module *mod) ...@@ -2571,7 +2571,9 @@ static noinline int do_init_module(struct module *mod)
/* Switch to core kallsyms now init is done: kallsyms may be walking! */ /* Switch to core kallsyms now init is done: kallsyms may be walking! */
rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
#endif #endif
module_enable_ro(mod, true); ret = module_enable_rodata_ro(mod, true);
if (ret)
goto fail_mutex_unlock;
mod_tree_remove_init(mod); mod_tree_remove_init(mod);
module_arch_freeing_init(mod); module_arch_freeing_init(mod);
for_class_mod_mem_type(type, init) { for_class_mod_mem_type(type, init) {
...@@ -2609,6 +2611,8 @@ static noinline int do_init_module(struct module *mod) ...@@ -2609,6 +2611,8 @@ static noinline int do_init_module(struct module *mod)
return 0; return 0;
fail_mutex_unlock:
mutex_unlock(&module_mutex);
fail_free_freeinit: fail_free_freeinit:
kfree(freeinit); kfree(freeinit);
fail: fail:
...@@ -2736,9 +2740,15 @@ static int complete_formation(struct module *mod, struct load_info *info) ...@@ -2736,9 +2740,15 @@ static int complete_formation(struct module *mod, struct load_info *info)
module_bug_finalize(info->hdr, info->sechdrs, mod); module_bug_finalize(info->hdr, info->sechdrs, mod);
module_cfi_finalize(info->hdr, info->sechdrs, mod); module_cfi_finalize(info->hdr, info->sechdrs, mod);
module_enable_ro(mod, false); err = module_enable_rodata_ro(mod, false);
module_enable_nx(mod); if (err)
module_enable_x(mod); goto out_strict_rwx;
err = module_enable_data_nx(mod);
if (err)
goto out_strict_rwx;
err = module_enable_text_rox(mod);
if (err)
goto out_strict_rwx;
/* /*
* Mark state as coming so strong_try_module_get() ignores us, * Mark state as coming so strong_try_module_get() ignores us,
...@@ -2749,6 +2759,8 @@ static int complete_formation(struct module *mod, struct load_info *info) ...@@ -2749,6 +2759,8 @@ static int complete_formation(struct module *mod, struct load_info *info)
return 0; return 0;
out_strict_rwx:
module_bug_cleanup(mod);
out: out:
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
return err; return err;
......
...@@ -11,13 +11,16 @@ ...@@ -11,13 +11,16 @@
#include <linux/set_memory.h> #include <linux/set_memory.h>
#include "internal.h" #include "internal.h"
static void module_set_memory(const struct module *mod, enum mod_mem_type type, static int module_set_memory(const struct module *mod, enum mod_mem_type type,
int (*set_memory)(unsigned long start, int num_pages)) int (*set_memory)(unsigned long start, int num_pages))
{ {
const struct module_memory *mod_mem = &mod->mem[type]; const struct module_memory *mod_mem = &mod->mem[type];
if (!mod_mem->base)
return 0;
set_vm_flush_reset_perms(mod_mem->base); set_vm_flush_reset_perms(mod_mem->base);
set_memory((unsigned long)mod_mem->base, mod_mem->size >> PAGE_SHIFT); return set_memory((unsigned long)mod_mem->base, mod_mem->size >> PAGE_SHIFT);
} }
/* /*
...@@ -26,37 +29,53 @@ static void module_set_memory(const struct module *mod, enum mod_mem_type type, ...@@ -26,37 +29,53 @@ static void module_set_memory(const struct module *mod, enum mod_mem_type type,
* CONFIG_STRICT_MODULE_RWX because they are needed regardless of whether we * CONFIG_STRICT_MODULE_RWX because they are needed regardless of whether we
* are strict. * are strict.
*/ */
void module_enable_x(const struct module *mod) int module_enable_text_rox(const struct module *mod)
{ {
for_class_mod_mem_type(type, text) for_class_mod_mem_type(type, text) {
module_set_memory(mod, type, set_memory_x); int ret;
if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
ret = module_set_memory(mod, type, set_memory_rox);
else
ret = module_set_memory(mod, type, set_memory_x);
if (ret)
return ret;
}
return 0;
} }
void module_enable_ro(const struct module *mod, bool after_init) int module_enable_rodata_ro(const struct module *mod, bool after_init)
{ {
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) int ret;
return;
#ifdef CONFIG_STRICT_MODULE_RWX if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX) || !rodata_enabled)
if (!rodata_enabled) return 0;
return;
#endif
module_set_memory(mod, MOD_TEXT, set_memory_ro); ret = module_set_memory(mod, MOD_RODATA, set_memory_ro);
module_set_memory(mod, MOD_INIT_TEXT, set_memory_ro); if (ret)
module_set_memory(mod, MOD_RODATA, set_memory_ro); return ret;
module_set_memory(mod, MOD_INIT_RODATA, set_memory_ro); ret = module_set_memory(mod, MOD_INIT_RODATA, set_memory_ro);
if (ret)
return ret;
if (after_init) if (after_init)
module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro); return module_set_memory(mod, MOD_RO_AFTER_INIT, set_memory_ro);
return 0;
} }
void module_enable_nx(const struct module *mod) int module_enable_data_nx(const struct module *mod)
{ {
if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) if (!IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
return; return 0;
for_class_mod_mem_type(type, data) for_class_mod_mem_type(type, data) {
module_set_memory(mod, type, set_memory_nx); int ret = module_set_memory(mod, type, set_memory_nx);
if (ret)
return ret;
}
return 0;
} }
int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
......
...@@ -58,11 +58,14 @@ static int num_test_devs; ...@@ -58,11 +58,14 @@ static int num_test_devs;
* @need_mod_put for your tests case. * @need_mod_put for your tests case.
*/ */
enum kmod_test_case { enum kmod_test_case {
/* private: */
__TEST_KMOD_INVALID = 0, __TEST_KMOD_INVALID = 0,
/* public: */
TEST_KMOD_DRIVER, TEST_KMOD_DRIVER,
TEST_KMOD_FS_TYPE, TEST_KMOD_FS_TYPE,
/* private: */
__TEST_KMOD_MAX, __TEST_KMOD_MAX,
}; };
...@@ -82,6 +85,7 @@ struct kmod_test_device; ...@@ -82,6 +85,7 @@ struct kmod_test_device;
* @ret_sync: return value if request_module() is used, sync request for * @ret_sync: return value if request_module() is used, sync request for
* @TEST_KMOD_DRIVER * @TEST_KMOD_DRIVER
* @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE * @fs_sync: return value of get_fs_type() for @TEST_KMOD_FS_TYPE
* @task_sync: kthread's task_struct or %NULL if not running
* @thread_idx: thread ID * @thread_idx: thread ID
* @test_dev: test device test is being performed under * @test_dev: test device test is being performed under
* @need_mod_put: Some tests (get_fs_type() is one) requires putting the module * @need_mod_put: Some tests (get_fs_type() is one) requires putting the module
...@@ -108,7 +112,7 @@ struct kmod_test_device_info { ...@@ -108,7 +112,7 @@ struct kmod_test_device_info {
* @dev: pointer to misc_dev's own struct device * @dev: pointer to misc_dev's own struct device
* @config_mutex: protects configuration of test * @config_mutex: protects configuration of test
* @trigger_mutex: the test trigger can only be fired once at a time * @trigger_mutex: the test trigger can only be fired once at a time
* @thread_lock: protects @done count, and the @info per each thread * @thread_mutex: protects @done count, and the @info per each thread
* @done: number of threads which have completed or failed * @done: number of threads which have completed or failed
* @test_is_oom: when we run out of memory, use this to halt moving forward * @test_is_oom: when we run out of memory, use this to halt moving forward
* @kthreads_done: completion used to signal when all work is done * @kthreads_done: completion used to signal when all work is done
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment