Commit 6eaaaac9 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus

* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus:
  remove CONFIG_KMOD from core kernel code
  remove CONFIG_KMOD from lib
  remove CONFIG_KMOD from sparc64
  rework try_then_request_module to do less in non-modular kernels
  remove mention of CONFIG_KMOD from documentation
  make CONFIG_KMOD invisible
  modules: Take a shortcut for checking if an address is in a module
  module: turn longs into ints for module sizes
  Shrink struct module: CONFIG_UNUSED_SYMBOLS ifdefs
  module: reorder struct module to save space on 64 bit builds
  module: generic each_symbol iterator function
  module: don't use stop_machine for waiting rmmod
parents 06b8147c a1ef5adb
......@@ -26,11 +26,11 @@ You can simplify mounting by just typing:
this will allocate the first available loopback device (and load loop.o
kernel module if necessary) automatically. If the loopback driver is not
loaded automatically, make sure that your kernel is compiled with kmod
support (CONFIG_KMOD) enabled. Beware that umount will not
deallocate /dev/loopN device if /etc/mtab file on your system is a
symbolic link to /proc/mounts. You will need to do it manually using
"-d" switch of losetup(8). Read losetup(8) manpage for more info.
loaded automatically, make sure that you have compiled the module and
that modprobe is functioning. Beware that umount will not deallocate
/dev/loopN device if /etc/mtab file on your system is a symbolic link to
/proc/mounts. You will need to do it manually using "-d" switch of
losetup(8). Read losetup(8) manpage for more info.
To create the BFS image under UnixWare you need to find out first which
slice contains it. The command prtvtoc(1M) is your friend:
......
......@@ -42,7 +42,7 @@
<sect1><title>Device Components</title>
!Esound/core/device.c
</sect1>
<sect1><title>KMOD and Device File Entries</title>
<sect1><title>Module requests and Device File Entries</title>
!Esound/core/sound.c
</sect1>
<sect1><title>Memory Management Helpers</title>
......
......@@ -305,21 +305,14 @@ driver, like this:
which will result in the needed drivers getting loaded automatically.
g. if you are planning on using kerneld to automatically load the
module for you, then you need to edit /etc/conf.modules and add the
g. if you are planning on having the kernel automatically request
the module for you, then you need to edit /etc/conf.modules and add the
following lines:
options ixj dspio=0x340 xio=0x330 ixjdebug=0
If you do this, then when you execute an application that uses the
module kerneld will load the module for you. Note that to do this,
you need to have your kernel set to support kerneld. You can check
for this by looking at /usr/src/linux/.config and you should see this:
# Loadable module support
#
<snip>
CONFIG_KMOD=y
module the kernel will request that it is loaded.
h. if you want non-root users to be able to read and write to the
ixj devices (this is a good idea!) you should do the following:
......
......@@ -193,9 +193,6 @@ Description: Automatic 'ovcamchip' module loading: 0 disabled, 1 enabled.
loads that module automatically. This action is performed as
once soon as the 'w9968cf' module is loaded into memory.
Default: 1
Note: The kernel must be compiled with the CONFIG_KMOD option
enabled for the 'ovcamchip' module to be loaded and for
this parameter to be present.
-------------------------------------------------------------------------------
Name: simcams
Type: int
......
......@@ -873,8 +873,8 @@ config HOTPLUG
plugged into slots found on all modern laptop computers. Another
example, used on modern desktops as well as laptops, is USB.
Enable HOTPLUG and KMOD, and build a modular kernel. Get agent
software (at <http://linux-hotplug.sourceforge.net/>) and install it.
Enable HOTPLUG and build a modular kernel. Get agent software
(from <http://linux-hotplug.sourceforge.net/>) and install it.
Then your kernel will automatically call out to a user mode "policy
agent" (/sbin/hotplug) to load modules and set up software needed
to use devices as you hotplug them.
......
......@@ -683,9 +683,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
((unsigned long) child_sf) - STACK_BIAS;
/* Special case, if we are spawning a kernel thread from
* a userspace task (via KMOD, NFS, or similar) we must
* disable performance counters in the child because the
* address space and protection realm are changing.
* a userspace task (usermode helper, NFS or similar), we
* must disable performance counters in the child because
* the address space and protection realm are changing.
*/
if (t->flags & _TIF_PERFCTR) {
t->user_cntd0 = t->user_cntd1 = NULL;
......
......@@ -36,9 +36,6 @@
#include <asm/processor.h>
#include <asm/timer.h>
#include <asm/head.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
#endif
#include <asm/prom.h>
#include "entry.h"
......
......@@ -194,8 +194,8 @@ config HOTPLUG
plugged into slots found on all modern laptop computers. Another
example, used on modern desktops as well as laptops, is USB.
Enable HOTPLUG and KMOD, and build a modular kernel. Get agent
software (at <http://linux-hotplug.sourceforge.net/>) and install it.
Enable HOTPLUG and build a modular kernel. Get agent software
(from <http://linux-hotplug.sourceforge.net/>) and install it.
Then your kernel will automatically call out to a user mode "policy
agent" (/sbin/hotplug) to load modules and set up software needed
to use devices as you hotplug them.
......
......@@ -25,15 +25,16 @@
#define KMOD_PATH_LEN 256
#ifdef CONFIG_KMOD
#ifdef CONFIG_MODULES
/* modprobe exit status on success, -ve on error. Return value
* usually useless though. */
extern int request_module(const char * name, ...) __attribute__ ((format (printf, 1, 2)));
#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x)))
#else
static inline int request_module(const char * name, ...) { return -ENOSYS; }
#define try_then_request_module(x, mod...) (x)
#endif
#define try_then_request_module(x, mod...) ((x) ?: (request_module(mod), (x)))
struct key;
struct file;
......
......@@ -249,27 +249,30 @@ struct module
/* Exported symbols */
const struct kernel_symbol *syms;
unsigned int num_syms;
const unsigned long *crcs;
unsigned int num_syms;
/* GPL-only exported symbols. */
const struct kernel_symbol *gpl_syms;
unsigned int num_gpl_syms;
const struct kernel_symbol *gpl_syms;
const unsigned long *gpl_crcs;
#ifdef CONFIG_UNUSED_SYMBOLS
/* unused exported symbols. */
const struct kernel_symbol *unused_syms;
unsigned int num_unused_syms;
const unsigned long *unused_crcs;
unsigned int num_unused_syms;
/* GPL-only, unused exported symbols. */
const struct kernel_symbol *unused_gpl_syms;
unsigned int num_unused_gpl_syms;
const struct kernel_symbol *unused_gpl_syms;
const unsigned long *unused_gpl_crcs;
#endif
/* symbols that will be GPL-only in the near future. */
const struct kernel_symbol *gpl_future_syms;
unsigned int num_gpl_future_syms;
const unsigned long *gpl_future_crcs;
unsigned int num_gpl_future_syms;
/* Exception table */
unsigned int num_exentries;
......@@ -285,10 +288,10 @@ struct module
void *module_core;
/* Here are the sizes of the init and core sections */
unsigned long init_size, core_size;
unsigned int init_size, core_size;
/* The size of the executable code in each section. */
unsigned long init_text_size, core_text_size;
unsigned int init_text_size, core_text_size;
/* The handle returned from unwind_add_table. */
void *unwind_info;
......@@ -300,29 +303,15 @@ struct module
#ifdef CONFIG_GENERIC_BUG
/* Support for BUG */
unsigned num_bugs;
struct list_head bug_list;
struct bug_entry *bug_table;
unsigned num_bugs;
#endif
#ifdef CONFIG_MODULE_UNLOAD
/* Reference counts */
struct module_ref ref[NR_CPUS];
/* What modules depend on me? */
struct list_head modules_which_use_me;
/* Who is waiting for us to be unloaded */
struct task_struct *waiter;
/* Destruction function. */
void (*exit)(void);
#endif
#ifdef CONFIG_KALLSYMS
/* We keep the symbol and string tables for kallsyms. */
Elf_Sym *symtab;
unsigned long num_symtab;
unsigned int num_symtab;
char *strtab;
/* Section attributes */
......@@ -342,6 +331,21 @@ struct module
struct marker *markers;
unsigned int num_markers;
#endif
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
struct list_head modules_which_use_me;
/* Who is waiting for us to be unloaded */
struct task_struct *waiter;
/* Destruction function. */
void (*exit)(void);
/* Reference counts */
struct module_ref ref[NR_CPUS];
#endif
};
#ifndef MODULE_ARCH_INIT
#define MODULE_ARCH_INIT {}
......
......@@ -856,8 +856,8 @@ config MODULE_UNLOAD
help
Without this option you will not be able to unload any
modules (note that some modules may not be unloadable
anyway), which makes your kernel slightly smaller and
simpler. If unsure, say Y.
anyway), which makes your kernel smaller, faster
and simpler. If unsure, say Y.
config MODULE_FORCE_UNLOAD
bool "Forced module unloading"
......@@ -893,16 +893,11 @@ config MODULE_SRCVERSION_ALL
will be created for all modules. If unsure, say N.
config KMOD
bool "Automatic kernel module loading"
def_bool y
depends on MODULES
help
Normally when you have selected some parts of the kernel to
be created as kernel modules, you must load them (using the
"modprobe" command) before you can use them. If you say Y
here, some parts of the kernel will be able to load modules
automatically: when a part of the kernel needs a module, it
runs modprobe with the appropriate arguments, thereby
loading the module if it is available. If unsure, say Y.
This is being removed soon. These days, CONFIG_MODULES
implies CONFIG_KMOD, so use that instead.
config STOP_MACHINE
bool
......
......@@ -65,7 +65,7 @@ lookup_exec_domain(u_long personality)
goto out;
}
#ifdef CONFIG_KMOD
#ifdef CONFIG_MODULES
read_unlock(&exec_domains_lock);
request_module("personality-%ld", pers);
read_lock(&exec_domains_lock);
......
......@@ -42,7 +42,7 @@ extern int max_threads;
static struct workqueue_struct *khelper_wq;
#ifdef CONFIG_KMOD
#ifdef CONFIG_MODULES
/*
modprobe_path is set via /proc/sys.
......
......@@ -70,6 +70,9 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
/* Bounds of module allocation, for speeding __module_text_address */
static unsigned long module_addr_min = -1UL, module_addr_max = 0;
int register_module_notifier(struct notifier_block * nb)
{
return blocking_notifier_chain_register(&module_notify_list, nb);
......@@ -134,17 +137,19 @@ extern const struct kernel_symbol __start___ksymtab_gpl[];
extern const struct kernel_symbol __stop___ksymtab_gpl[];
extern const struct kernel_symbol __start___ksymtab_gpl_future[];
extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
extern const struct kernel_symbol __start___ksymtab_unused[];
extern const struct kernel_symbol __stop___ksymtab_unused[];
extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
extern const struct kernel_symbol __start___ksymtab_gpl_future[];
extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
extern const unsigned long __start___kcrctab[];
extern const unsigned long __start___kcrctab_gpl[];
extern const unsigned long __start___kcrctab_gpl_future[];
#ifdef CONFIG_UNUSED_SYMBOLS
extern const struct kernel_symbol __start___ksymtab_unused[];
extern const struct kernel_symbol __stop___ksymtab_unused[];
extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
extern const unsigned long __start___kcrctab_unused[];
extern const unsigned long __start___kcrctab_unused_gpl[];
#endif
#ifndef CONFIG_MODVERSIONS
#define symversion(base, idx) NULL
......@@ -152,156 +157,186 @@ extern const unsigned long __start___kcrctab_unused_gpl[];
#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
#endif
/* lookup symbol in given range of kernel_symbols */
static const struct kernel_symbol *lookup_symbol(const char *name,
const struct kernel_symbol *start,
const struct kernel_symbol *stop)
{
const struct kernel_symbol *ks = start;
for (; ks < stop; ks++)
if (strcmp(ks->name, name) == 0)
return ks;
return NULL;
}
static bool always_ok(bool gplok, bool warn, const char *name)
{
return true;
}
static bool printk_unused_warning(bool gplok, bool warn, const char *name)
{
if (warn) {
printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
"however this module is using it.\n", name);
printk(KERN_WARNING
"This symbol will go away in the future.\n");
printk(KERN_WARNING
"Please evalute if this is the right api to use and if "
"it really is, submit a report the linux kernel "
"mailinglist together with submitting your code for "
"inclusion.\n");
}
return true;
}
static bool gpl_only_unused_warning(bool gplok, bool warn, const char *name)
{
if (!gplok)
return false;
return printk_unused_warning(gplok, warn, name);
}
static bool gpl_only(bool gplok, bool warn, const char *name)
{
return gplok;
}
static bool warn_if_not_gpl(bool gplok, bool warn, const char *name)
{
if (!gplok && warn) {
printk(KERN_WARNING "Symbol %s is being used "
"by a non-GPL module, which will not "
"be allowed in the future\n", name);
printk(KERN_WARNING "Please see the file "
"Documentation/feature-removal-schedule.txt "
"in the kernel source tree for more details.\n");
}
return true;
}
struct symsearch {
const struct kernel_symbol *start, *stop;
const unsigned long *crcs;
bool (*check)(bool gplok, bool warn, const char *name);
enum {
NOT_GPL_ONLY,
GPL_ONLY,
WILL_BE_GPL_ONLY,
} licence;
bool unused;
};
/* Look through this array of symbol tables for a symbol match which
* passes the check function. */
static const struct kernel_symbol *search_symarrays(const struct symsearch *arr,
unsigned int num,
const char *name,
bool gplok,
bool warn,
const unsigned long **crc)
static bool each_symbol_in_section(const struct symsearch *arr,
unsigned int arrsize,
struct module *owner,
bool (*fn)(const struct symsearch *syms,
struct module *owner,
unsigned int symnum, void *data),
void *data)
{
unsigned int i;
const struct kernel_symbol *ks;
unsigned int i, j;
for (i = 0; i < num; i++) {
ks = lookup_symbol(name, arr[i].start, arr[i].stop);
if (!ks || !arr[i].check(gplok, warn, name))
continue;
if (crc)
*crc = symversion(arr[i].crcs, ks - arr[i].start);
return ks;
for (j = 0; j < arrsize; j++) {
for (i = 0; i < arr[j].stop - arr[j].start; i++)
if (fn(&arr[j], owner, i, data))
return true;
}
return NULL;
return false;
}
/* Find a symbol, return value, (optional) crc and (optional) module
* which owns it */
static unsigned long find_symbol(const char *name,
struct module **owner,
const unsigned long **crc,
bool gplok,
bool warn)
/* Returns true as soon as fn returns true, otherwise false. */
static bool each_symbol(bool (*fn)(const struct symsearch *arr,
struct module *owner,
unsigned int symnum, void *data),
void *data)
{
struct module *mod;
const struct kernel_symbol *ks;
const struct symsearch arr[] = {
{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
always_ok },
NOT_GPL_ONLY, false },
{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
__start___kcrctab_gpl, gpl_only },
__start___kcrctab_gpl,
GPL_ONLY, false },
{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
__start___kcrctab_gpl_future, warn_if_not_gpl },
__start___kcrctab_gpl_future,
WILL_BE_GPL_ONLY, false },
#ifdef CONFIG_UNUSED_SYMBOLS
{ __start___ksymtab_unused, __stop___ksymtab_unused,
__start___kcrctab_unused, printk_unused_warning },
__start___kcrctab_unused,
NOT_GPL_ONLY, true },
{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
__start___kcrctab_unused_gpl, gpl_only_unused_warning },
__start___kcrctab_unused_gpl,
GPL_ONLY, true },
#endif
};
/* Core kernel first. */
ks = search_symarrays(arr, ARRAY_SIZE(arr), name, gplok, warn, crc);
if (ks) {
if (owner)
*owner = NULL;
return ks->value;
}
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
return true;
/* Now try modules. */
list_for_each_entry(mod, &modules, list) {
struct symsearch arr[] = {
{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
always_ok },
NOT_GPL_ONLY, false },
{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
mod->gpl_crcs, gpl_only },
mod->gpl_crcs,
GPL_ONLY, false },
{ mod->gpl_future_syms,
mod->gpl_future_syms + mod->num_gpl_future_syms,
mod->gpl_future_crcs, warn_if_not_gpl },
mod->gpl_future_crcs,
WILL_BE_GPL_ONLY, false },
#ifdef CONFIG_UNUSED_SYMBOLS
{ mod->unused_syms,
mod->unused_syms + mod->num_unused_syms,
mod->unused_crcs, printk_unused_warning },
mod->unused_crcs,
NOT_GPL_ONLY, true },
{ mod->unused_gpl_syms,
mod->unused_gpl_syms + mod->num_unused_gpl_syms,
mod->unused_gpl_crcs, gpl_only_unused_warning },
mod->unused_gpl_crcs,
GPL_ONLY, true },
#endif
};
ks = search_symarrays(arr, ARRAY_SIZE(arr),
name, gplok, warn, crc);
if (ks) {
if (owner)
*owner = mod;
return ks->value;
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
return true;
}
return false;
}
struct find_symbol_arg {
/* Input */
const char *name;
bool gplok;
bool warn;
/* Output */
struct module *owner;
const unsigned long *crc;
unsigned long value;
};
static bool find_symbol_in_section(const struct symsearch *syms,
struct module *owner,
unsigned int symnum, void *data)
{
struct find_symbol_arg *fsa = data;
if (strcmp(syms->start[symnum].name, fsa->name) != 0)
return false;
if (!fsa->gplok) {
if (syms->licence == GPL_ONLY)
return false;
if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
printk(KERN_WARNING "Symbol %s is being used "
"by a non-GPL module, which will not "
"be allowed in the future\n", fsa->name);
printk(KERN_WARNING "Please see the file "
"Documentation/feature-removal-schedule.txt "
"in the kernel source tree for more details.\n");
}
}
#ifdef CONFIG_UNUSED_SYMBOLS
if (syms->unused && fsa->warn) {
printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
"however this module is using it.\n", fsa->name);
printk(KERN_WARNING
"This symbol will go away in the future.\n");
printk(KERN_WARNING
"Please evalute if this is the right api to use and if "
"it really is, submit a report the linux kernel "
"mailinglist together with submitting your code for "
"inclusion.\n");
}
#endif
fsa->owner = owner;
fsa->crc = symversion(syms->crcs, symnum);
fsa->value = syms->start[symnum].value;
return true;
}
/* Find a symbol, return value, (optional) crc and (optional) module
* which owns it */
static unsigned long find_symbol(const char *name,
struct module **owner,
const unsigned long **crc,
bool gplok,
bool warn)
{
struct find_symbol_arg fsa;
fsa.name = name;
fsa.gplok = gplok;
fsa.warn = warn;
if (each_symbol(find_symbol_in_section, &fsa)) {
if (owner)
*owner = fsa.owner;
if (crc)
*crc = fsa.crc;
return fsa.value;
}
DEBUGP("Failed to find symbol %s\n", name);
return -ENOENT;
}
/* lookup symbol in given range of kernel_symbols */
static const struct kernel_symbol *lookup_symbol(const char *name,
const struct kernel_symbol *start,
const struct kernel_symbol *stop)
{
const struct kernel_symbol *ks = start;
for (; ks < stop; ks++)
if (strcmp(ks->name, name) == 0)
return ks;
return NULL;
}
/* Search for module by name: must hold module_mutex. */
static struct module *find_module(const char *name)
{
......@@ -639,8 +674,8 @@ static int __try_stop_module(void *_sref)
{
struct stopref *sref = _sref;
/* If it's not unused, quit unless we are told to block. */
if ((sref->flags & O_NONBLOCK) && module_refcount(sref->mod) != 0) {
/* If it's not unused, quit unless we're forcing. */
if (module_refcount(sref->mod) != 0) {
if (!(*sref->forced = try_force_unload(sref->flags)))
return -EWOULDBLOCK;
}
......@@ -652,9 +687,16 @@ static int __try_stop_module(void *_sref)
static int try_stop_module(struct module *mod, int flags, int *forced)
{
if (flags & O_NONBLOCK) {
struct stopref sref = { mod, flags, forced };
return stop_machine_run(__try_stop_module, &sref, NR_CPUS);
} else {
/* We don't need to stop the machine for this. */
mod->state = MODULE_STATE_GOING;
synchronize_sched();
return 0;
}
}
unsigned int module_refcount(struct module *mod)
......@@ -1445,8 +1487,10 @@ static int verify_export_symbols(struct module *mod)
{ mod->syms, mod->num_syms },
{ mod->gpl_syms, mod->num_gpl_syms },
{ mod->gpl_future_syms, mod->num_gpl_future_syms },
#ifdef CONFIG_UNUSED_SYMBOLS
{ mod->unused_syms, mod->num_unused_syms },
{ mod->unused_gpl_syms, mod->num_unused_gpl_syms },
#endif
};
for (i = 0; i < ARRAY_SIZE(arr); i++) {
......@@ -1526,7 +1570,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
}
/* Update size with this section: return offset. */
static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
static long get_offset(unsigned int *size, Elf_Shdr *sechdr)
{
long ret;
......@@ -1738,6 +1782,20 @@ static inline void add_kallsyms(struct module *mod,
}
#endif /* CONFIG_KALLSYMS */
static void *module_alloc_update_bounds(unsigned long size)
{
void *ret = module_alloc(size);
if (ret) {
/* Update module bounds. */
if ((unsigned long)ret < module_addr_min)
module_addr_min = (unsigned long)ret;
if ((unsigned long)ret + size > module_addr_max)
module_addr_max = (unsigned long)ret + size;
}
return ret;
}
/* Allocate and load the module: note that size of section 0 is always
zero, and we rely on this for optional sections. */
static struct module *load_module(void __user *umod,
......@@ -1764,10 +1822,12 @@ static struct module *load_module(void __user *umod,
unsigned int gplfutureindex;
unsigned int gplfuturecrcindex;
unsigned int unwindex = 0;
#ifdef CONFIG_UNUSED_SYMBOLS
unsigned int unusedindex;
unsigned int unusedcrcindex;
unsigned int unusedgplindex;
unsigned int unusedgplcrcindex;
#endif
unsigned int markersindex;
unsigned int markersstringsindex;
struct module *mod;
......@@ -1850,13 +1910,15 @@ static struct module *load_module(void __user *umod,
exportindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab");
gplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl");
gplfutureindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_gpl_future");
unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused");
unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl");
crcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab");
gplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl");
gplfuturecrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_gpl_future");
#ifdef CONFIG_UNUSED_SYMBOLS
unusedindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused");
unusedgplindex = find_sec(hdr, sechdrs, secstrings, "__ksymtab_unused_gpl");
unusedcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused");
unusedgplcrcindex = find_sec(hdr, sechdrs, secstrings, "__kcrctab_unused_gpl");
#endif
setupindex = find_sec(hdr, sechdrs, secstrings, "__param");
exindex = find_sec(hdr, sechdrs, secstrings, "__ex_table");
obsparmindex = find_sec(hdr, sechdrs, secstrings, "__obsparm");
......@@ -1935,7 +1997,7 @@ static struct module *load_module(void __user *umod,
layout_sections(mod, hdr, sechdrs, secstrings);
/* Do the allocs. */
ptr = module_alloc(mod->core_size);
ptr = module_alloc_update_bounds(mod->core_size);
if (!ptr) {
err = -ENOMEM;
goto free_percpu;
......@@ -1943,7 +2005,7 @@ static struct module *load_module(void __user *umod,
memset(ptr, 0, mod->core_size);
mod->module_core = ptr;
ptr = module_alloc(mod->init_size);
ptr = module_alloc_update_bounds(mod->init_size);
if (!ptr && mod->init_size) {
err = -ENOMEM;
goto free_core;
......@@ -2018,14 +2080,15 @@ static struct module *load_module(void __user *umod,
mod->gpl_crcs = (void *)sechdrs[gplcrcindex].sh_addr;
mod->num_gpl_future_syms = sechdrs[gplfutureindex].sh_size /
sizeof(*mod->gpl_future_syms);
mod->num_unused_syms = sechdrs[unusedindex].sh_size /
sizeof(*mod->unused_syms);
mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size /
sizeof(*mod->unused_gpl_syms);
mod->gpl_future_syms = (void *)sechdrs[gplfutureindex].sh_addr;
if (gplfuturecrcindex)
mod->gpl_future_crcs = (void *)sechdrs[gplfuturecrcindex].sh_addr;
#ifdef CONFIG_UNUSED_SYMBOLS
mod->num_unused_syms = sechdrs[unusedindex].sh_size /
sizeof(*mod->unused_syms);
mod->num_unused_gpl_syms = sechdrs[unusedgplindex].sh_size /
sizeof(*mod->unused_gpl_syms);
mod->unused_syms = (void *)sechdrs[unusedindex].sh_addr;
if (unusedcrcindex)
mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr;
......@@ -2033,13 +2096,17 @@ static struct module *load_module(void __user *umod,
if (unusedgplcrcindex)
mod->unused_gpl_crcs
= (void *)sechdrs[unusedgplcrcindex].sh_addr;
#endif
#ifdef CONFIG_MODVERSIONS
if ((mod->num_syms && !crcindex) ||
(mod->num_gpl_syms && !gplcrcindex) ||
(mod->num_gpl_future_syms && !gplfuturecrcindex) ||
(mod->num_unused_syms && !unusedcrcindex) ||
(mod->num_unused_gpl_syms && !unusedgplcrcindex)) {
if ((mod->num_syms && !crcindex)
|| (mod->num_gpl_syms && !gplcrcindex)
|| (mod->num_gpl_future_syms && !gplfuturecrcindex)
#ifdef CONFIG_UNUSED_SYMBOLS
|| (mod->num_unused_syms && !unusedcrcindex)
|| (mod->num_unused_gpl_syms && !unusedgplcrcindex)
#endif
) {
printk(KERN_WARNING "%s: No versions for exported symbols.\n", mod->name);
err = try_to_force_load(mod, "nocrc");
if (err)
......@@ -2512,7 +2579,7 @@ static int m_show(struct seq_file *m, void *p)
struct module *mod = list_entry(p, struct module, list);
char buf[8];
seq_printf(m, "%s %lu",
seq_printf(m, "%s %u",
mod->name, mod->init_size + mod->core_size);
print_unload_info(m, mod);
......@@ -2595,6 +2662,9 @@ struct module *__module_text_address(unsigned long addr)
{
struct module *mod;
if (addr < module_addr_min || addr > module_addr_max)
return NULL;
list_for_each_entry(mod, &modules, list)
if (within(addr, mod->module_init, mod->init_text_size)
|| within(addr, mod->module_core, mod->core_text_size))
......
......@@ -110,7 +110,7 @@ static int min_percpu_pagelist_fract = 8;
static int ngroups_max = NGROUPS_MAX;
#ifdef CONFIG_KMOD
#ifdef CONFIG_MODULES
extern char modprobe_path[];
#endif
#ifdef CONFIG_CHR_DEV_SG
......@@ -475,7 +475,7 @@ static struct ctl_table kern_table[] = {
.proc_handler = &ftrace_enable_sysctl,
},
#endif
#ifdef CONFIG_KMOD
#ifdef CONFIG_MODULES
{
.ctl_name = KERN_MODPROBE,
.procname = "modprobe",
......
......@@ -267,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
return ERR_PTR(-EINVAL);
ops = lookup_ts_algo(algo);
#ifdef CONFIG_KMOD
#ifdef CONFIG_MODULES
/*
* Why not always autoload you may ask. Some users are
* in a situation where requesting a module may deadlock,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment