Commit d72b3751 authored by Andi Kleen's avatar Andi Kleen Committed by Rusty Russell

Remove stop_machine during module load v2

Remove stop_machine during module load v2

module loading currently does a stop_machine on each module load to insert
the module into the global module lists.  Especially on larger systems this
can be quite expensive.

It does that to handle concurrent lock lessmodule list readers
like kallsyms.

I don't think stop_machine() is actually needed to insert something
into a list though. There are no concurrent writers because the
module mutex is taken. And the RCU list functions know how to insert
a node into a list with the right memory ordering so that concurrent
readers don't go off into the wood.

So remove the stop_machine for the module list insert and just
do a list_add_rcu() instead.

Module removal will still do a stop_machine of course, it needs
that for other reasons.

v2: Revised readers based on Paul's comments. All readers that only
    rely on disabled preemption need to be changed to list_for_each_rcu().
    Done that. The others are ok because they have the modules mutex.
    Also added a possible missing preempt disable for print_modules().

[cc Paul McKenney for review. It's not RCU, but quite similar.]
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
parent 5e458cc0
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/unwind.h> #include <linux/unwind.h>
#include <linux/rculist.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <linux/license.h> #include <linux/license.h>
...@@ -63,7 +64,7 @@ ...@@ -63,7 +64,7 @@
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
/* List of modules, protected by module_mutex or preempt_disable /* List of modules, protected by module_mutex or preempt_disable
* (add/delete uses stop_machine). */ * (delete uses stop_machine/add uses RCU list operations). */
static DEFINE_MUTEX(module_mutex); static DEFINE_MUTEX(module_mutex);
static LIST_HEAD(modules); static LIST_HEAD(modules);
...@@ -241,7 +242,7 @@ static bool each_symbol(bool (*fn)(const struct symsearch *arr, ...@@ -241,7 +242,7 @@ static bool each_symbol(bool (*fn)(const struct symsearch *arr,
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
return true; return true;
list_for_each_entry(mod, &modules, list) { list_for_each_entry_rcu(mod, &modules, list) {
struct symsearch arr[] = { struct symsearch arr[] = {
{ mod->syms, mod->syms + mod->num_syms, mod->crcs, { mod->syms, mod->syms + mod->num_syms, mod->crcs,
NOT_GPL_ONLY, false }, NOT_GPL_ONLY, false },
...@@ -1416,17 +1417,6 @@ static void mod_kobject_remove(struct module *mod) ...@@ -1416,17 +1417,6 @@ static void mod_kobject_remove(struct module *mod)
mod_sysfs_fini(mod); mod_sysfs_fini(mod);
} }
/*
* link the module with the whole machine is stopped with interrupts off
* - this defends against kallsyms not taking locks
*/
static int __link_module(void *_mod)
{
struct module *mod = _mod;
list_add(&mod->list, &modules);
return 0;
}
/* /*
* unlink the module with the whole machine is stopped with interrupts off * unlink the module with the whole machine is stopped with interrupts off
* - this defends against kallsyms not taking locks * - this defends against kallsyms not taking locks
...@@ -2239,9 +2229,13 @@ static noinline struct module *load_module(void __user *umod, ...@@ -2239,9 +2229,13 @@ static noinline struct module *load_module(void __user *umod,
mod->name); mod->name);
/* Now sew it into the lists so we can get lockdep and oops /* Now sew it into the lists so we can get lockdep and oops
* info during argument parsing. Noone should access us, since * info during argument parsing. Noone should access us, since
* strong_try_module_get() will fail. */ * strong_try_module_get() will fail.
stop_machine(__link_module, mod, NULL); * lockdep/oops can run asynchronous, so use the RCU list insertion
* function to insert in a way safe to concurrent readers.
* The mutex protects against concurrent writers.
*/
list_add_rcu(&mod->list, &modules);
err = parse_args(mod->name, mod->args, kp, num_kp, NULL); err = parse_args(mod->name, mod->args, kp, num_kp, NULL);
if (err < 0) if (err < 0)
...@@ -2436,7 +2430,7 @@ const char *module_address_lookup(unsigned long addr, ...@@ -2436,7 +2430,7 @@ const char *module_address_lookup(unsigned long addr,
const char *ret = NULL; const char *ret = NULL;
preempt_disable(); preempt_disable();
list_for_each_entry(mod, &modules, list) { list_for_each_entry_rcu(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) if (within(addr, mod->module_init, mod->init_size)
|| within(addr, mod->module_core, mod->core_size)) { || within(addr, mod->module_core, mod->core_size)) {
if (modname) if (modname)
...@@ -2459,7 +2453,7 @@ int lookup_module_symbol_name(unsigned long addr, char *symname) ...@@ -2459,7 +2453,7 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
struct module *mod; struct module *mod;
preempt_disable(); preempt_disable();
list_for_each_entry(mod, &modules, list) { list_for_each_entry_rcu(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) || if (within(addr, mod->module_init, mod->init_size) ||
within(addr, mod->module_core, mod->core_size)) { within(addr, mod->module_core, mod->core_size)) {
const char *sym; const char *sym;
...@@ -2483,7 +2477,7 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, ...@@ -2483,7 +2477,7 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
struct module *mod; struct module *mod;
preempt_disable(); preempt_disable();
list_for_each_entry(mod, &modules, list) { list_for_each_entry_rcu(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) || if (within(addr, mod->module_init, mod->init_size) ||
within(addr, mod->module_core, mod->core_size)) { within(addr, mod->module_core, mod->core_size)) {
const char *sym; const char *sym;
...@@ -2510,7 +2504,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, ...@@ -2510,7 +2504,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
struct module *mod; struct module *mod;
preempt_disable(); preempt_disable();
list_for_each_entry(mod, &modules, list) { list_for_each_entry_rcu(mod, &modules, list) {
if (symnum < mod->num_symtab) { if (symnum < mod->num_symtab) {
*value = mod->symtab[symnum].st_value; *value = mod->symtab[symnum].st_value;
*type = mod->symtab[symnum].st_info; *type = mod->symtab[symnum].st_info;
...@@ -2553,7 +2547,7 @@ unsigned long module_kallsyms_lookup_name(const char *name) ...@@ -2553,7 +2547,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
ret = mod_find_symname(mod, colon+1); ret = mod_find_symname(mod, colon+1);
*colon = ':'; *colon = ':';
} else { } else {
list_for_each_entry(mod, &modules, list) list_for_each_entry_rcu(mod, &modules, list)
if ((ret = mod_find_symname(mod, name)) != 0) if ((ret = mod_find_symname(mod, name)) != 0)
break; break;
} }
...@@ -2656,7 +2650,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr) ...@@ -2656,7 +2650,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
struct module *mod; struct module *mod;
preempt_disable(); preempt_disable();
list_for_each_entry(mod, &modules, list) { list_for_each_entry_rcu(mod, &modules, list) {
if (mod->num_exentries == 0) if (mod->num_exentries == 0)
continue; continue;
...@@ -2682,7 +2676,7 @@ int is_module_address(unsigned long addr) ...@@ -2682,7 +2676,7 @@ int is_module_address(unsigned long addr)
preempt_disable(); preempt_disable();
list_for_each_entry(mod, &modules, list) { list_for_each_entry_rcu(mod, &modules, list) {
if (within(addr, mod->module_core, mod->core_size)) { if (within(addr, mod->module_core, mod->core_size)) {
preempt_enable(); preempt_enable();
return 1; return 1;
...@@ -2703,7 +2697,7 @@ struct module *__module_text_address(unsigned long addr) ...@@ -2703,7 +2697,7 @@ struct module *__module_text_address(unsigned long addr)
if (addr < module_addr_min || addr > module_addr_max) if (addr < module_addr_min || addr > module_addr_max)
return NULL; return NULL;
list_for_each_entry(mod, &modules, list) list_for_each_entry_rcu(mod, &modules, list)
if (within(addr, mod->module_init, mod->init_text_size) if (within(addr, mod->module_init, mod->init_text_size)
|| within(addr, mod->module_core, mod->core_text_size)) || within(addr, mod->module_core, mod->core_text_size))
return mod; return mod;
...@@ -2728,8 +2722,11 @@ void print_modules(void) ...@@ -2728,8 +2722,11 @@ void print_modules(void)
char buf[8]; char buf[8];
printk("Modules linked in:"); printk("Modules linked in:");
list_for_each_entry(mod, &modules, list) /* Most callers should already have preempt disabled, but make sure */
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list)
printk(" %s%s", mod->name, module_flags(mod, buf)); printk(" %s%s", mod->name, module_flags(mod, buf));
preempt_enable();
if (last_unloaded_module[0]) if (last_unloaded_module[0])
printk(" [last unloaded: %s]", last_unloaded_module); printk(" [last unloaded: %s]", last_unloaded_module);
printk("\n"); printk("\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment