Commit 8d8022e8 authored by Rusty Russell's avatar Rusty Russell

module: do percpu allocation after uniqueness check. No, really!

v3.8-rc1-5-g1fb9341a was supposed to stop parallel kvm loads exhausting
percpu memory on large machines:

    Now we have a new state MODULE_STATE_UNFORMED, we can insert the
    module into the list (and thus guarantee its uniqueness) before we
    allocate the per-cpu region.

In my defence, it didn't actually say the patch did this.  Just that
we "can".

This patch actually *does* it.
Signed-off-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Tested-by: default avatarJim Hull <jim.hull@hp.com>
Cc: stable@kernel.org # 3.8
parent 54041d8a
...@@ -2940,7 +2940,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) ...@@ -2940,7 +2940,6 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
{ {
/* Module within temporary copy. */ /* Module within temporary copy. */
struct module *mod; struct module *mod;
Elf_Shdr *pcpusec;
int err; int err;
mod = setup_load_info(info, flags); mod = setup_load_info(info, flags);
...@@ -2955,17 +2954,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) ...@@ -2955,17 +2954,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
err = module_frob_arch_sections(info->hdr, info->sechdrs, err = module_frob_arch_sections(info->hdr, info->sechdrs,
info->secstrings, mod); info->secstrings, mod);
if (err < 0) if (err < 0)
goto out; return ERR_PTR(err);
pcpusec = &info->sechdrs[info->index.pcpu]; /* We will do a special allocation for per-cpu sections later. */
if (pcpusec->sh_size) { info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
/* We have a special allocation for this section. */
err = percpu_modalloc(mod,
pcpusec->sh_size, pcpusec->sh_addralign);
if (err)
goto out;
pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
}
/* Determine total sizes, and put offsets in sh_entsize. For now /* Determine total sizes, and put offsets in sh_entsize. For now
this is done generically; there doesn't appear to be any this is done generically; there doesn't appear to be any
...@@ -2976,17 +2968,22 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) ...@@ -2976,17 +2968,22 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
/* Allocate and move to the final place */ /* Allocate and move to the final place */
err = move_module(mod, info); err = move_module(mod, info);
if (err) if (err)
goto free_percpu; return ERR_PTR(err);
/* Module has been copied to its final place now: return it. */ /* Module has been copied to its final place now: return it. */
mod = (void *)info->sechdrs[info->index.mod].sh_addr; mod = (void *)info->sechdrs[info->index.mod].sh_addr;
kmemleak_load_module(mod, info); kmemleak_load_module(mod, info);
return mod; return mod;
}
free_percpu: static int alloc_module_percpu(struct module *mod, struct load_info *info)
percpu_modfree(mod); {
out: Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
return ERR_PTR(err); if (!pcpusec->sh_size)
return 0;
/* We have a special allocation for this section. */
return percpu_modalloc(mod, pcpusec->sh_size, pcpusec->sh_addralign);
} }
/* mod is no longer valid after this! */ /* mod is no longer valid after this! */
...@@ -3262,6 +3259,11 @@ static int load_module(struct load_info *info, const char __user *uargs, ...@@ -3262,6 +3259,11 @@ static int load_module(struct load_info *info, const char __user *uargs,
} }
#endif #endif
/* To avoid stressing percpu allocator, do this once we're unique. */
err = alloc_module_percpu(mod, info);
if (err)
goto unlink_mod;
/* Now module is in final location, initialize linked lists, etc. */ /* Now module is in final location, initialize linked lists, etc. */
err = module_unload_init(mod); err = module_unload_init(mod);
if (err) if (err)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment