Commit aa65be3f authored by Rusty Russell's avatar Rusty Russell Committed by Andy Grover

[PATCH] In-kernel Module Loader

This is an implementation of the in-kernel module loader extending
the try_inc_mod_count() primitive and making its use compulsory.
This has the benifit of simplicity, and similarity to the existing
scheme.  To reduce the cost of the constant increments and
decrements, reference counters are lockless and per-cpu.

Eliminated (coming in following patches):
 o Modversions
 o Module parameters
 o kallsyms
 o EXPORT_SYMBOL_GPL and MODULE_LICENCE checks
 o DEVICE_TABLE support.

New features:
 o Typesafe symbol_get/symbol_put
 o Single "insert this module" syscall interface allows trivial userspace.
 o Raceless loading and unloading

You will need the trivial replacement module utilities from:
	http://ozlabs.org/~rusty/module-init-tools-0.6.tar.gz
parent 850b830c
...@@ -157,7 +157,6 @@ OBJCOPY = $(CROSS_COMPILE)objcopy ...@@ -157,7 +157,6 @@ OBJCOPY = $(CROSS_COMPILE)objcopy
OBJDUMP = $(CROSS_COMPILE)objdump OBJDUMP = $(CROSS_COMPILE)objdump
AWK = awk AWK = awk
GENKSYMS = /sbin/genksyms GENKSYMS = /sbin/genksyms
DEPMOD = /sbin/depmod
KALLSYMS = /sbin/kallsyms KALLSYMS = /sbin/kallsyms
PERL = perl PERL = perl
MODFLAGS = -DMODULE MODFLAGS = -DMODULE
...@@ -516,7 +515,7 @@ modules: $(SUBDIRS) ...@@ -516,7 +515,7 @@ modules: $(SUBDIRS)
# Install modules # Install modules
.PHONY: modules_install .PHONY: modules_install
modules_install: _modinst_ $(patsubst %, _modinst_%, $(SUBDIRS)) _modinst_post modules_install: _modinst_ $(patsubst %, _modinst_%, $(SUBDIRS))
.PHONY: _modinst_ .PHONY: _modinst_
_modinst_: _modinst_:
...@@ -525,20 +524,6 @@ _modinst_: ...@@ -525,20 +524,6 @@ _modinst_:
@mkdir -p $(MODLIB)/kernel @mkdir -p $(MODLIB)/kernel
@ln -s $(TOPDIR) $(MODLIB)/build @ln -s $(TOPDIR) $(MODLIB)/build
# If System.map exists, run depmod. This deliberately does not have a
# dependency on System.map since that would run the dependency tree on
# vmlinux. This depmod is only for convenience to give the initial
# boot a modules.dep even before / is mounted read-write. However the
# boot script depmod is the master version.
ifeq "$(strip $(INSTALL_MOD_PATH))" ""
depmod_opts :=
else
depmod_opts := -b $(INSTALL_MOD_PATH) -r
endif
.PHONY: _modinst_post
_modinst_post:
if [ -r System.map ]; then $(DEPMOD) -ae -F System.map $(depmod_opts) $(KERNELRELEASE); fi
.PHONY: $(patsubst %, _modinst_%, $(SUBDIRS)) .PHONY: $(patsubst %, _modinst_%, $(SUBDIRS))
$(patsubst %, _modinst_%, $(SUBDIRS)) : $(patsubst %, _modinst_%, $(SUBDIRS)) :
$(Q)$(MAKE) -f scripts/Makefile.modinst obj=$(patsubst _modinst_%,%,$@) $(Q)$(MAKE) -f scripts/Makefile.modinst obj=$(patsubst _modinst_%,%,$@)
......
...@@ -1604,13 +1604,14 @@ config DEBUG_HIGHMEM ...@@ -1604,13 +1604,14 @@ config DEBUG_HIGHMEM
This options enables addition error checking for high memory systems. This options enables addition error checking for high memory systems.
Disable for production systems. Disable for production systems.
config KALLSYMS # Reimplemented RSN.
bool "Load all symbols for debugging/kksymoops" #config KALLSYMS
depends on DEBUG_KERNEL # bool "Load all symbols for debugging/kksymoops"
help # depends on DEBUG_KERNEL
Say Y here to let the kernel print out symbolic crash information and # help
symbolic stack backtraces. This increases the size of the kernel # Say Y here to let the kernel print out symbolic crash information and
somewhat, as all symbols have to be loaded into the kernel image. # symbolic stack backtraces. This increases the size of the kernel
# somewhat, as all symbols have to be loaded into the kernel image.
config X86_EXTRA_IRQS config X86_EXTRA_IRQS
bool bool
......
...@@ -813,13 +813,13 @@ config DEBUG_KERNEL ...@@ -813,13 +813,13 @@ config DEBUG_KERNEL
Say Y here if you are developing drivers or trying to debug and Say Y here if you are developing drivers or trying to debug and
identify kernel problems. identify kernel problems.
config KALLSYMS # config KALLSYMS
bool "Load all symbols for debugging/kksymoops" # bool "Load all symbols for debugging/kksymoops"
depends on DEBUG_KERNEL # depends on DEBUG_KERNEL
help # help
Say Y here to let the kernel print out symbolic crash information and # Say Y here to let the kernel print out symbolic crash information and
symbolic stack backtraces. This increases the size of the kernel # symbolic stack backtraces. This increases the size of the kernel
somewhat, as all symbols have to be loaded into the kernel image. # somewhat, as all symbols have to be loaded into the kernel image.
config IA64_PRINT_HAZARDS config IA64_PRINT_HAZARDS
bool "Print possible IA-64 dependency violations to console" bool "Print possible IA-64 dependency violations to console"
......
...@@ -1807,9 +1807,9 @@ config DEBUG_HIGHMEM ...@@ -1807,9 +1807,9 @@ config DEBUG_HIGHMEM
bool "Highmem debugging" bool "Highmem debugging"
depends on DEBUG_KERNEL && HIGHMEM depends on DEBUG_KERNEL && HIGHMEM
config KALLSYMS # config KALLSYMS
bool "Load all symbols for debugging/kksymoops" # bool "Load all symbols for debugging/kksymoops"
depends on DEBUG_KERNEL # depends on DEBUG_KERNEL
config KGDB config KGDB
bool "Include kgdb kernel debugger" bool "Include kgdb kernel debugger"
......
...@@ -739,13 +739,13 @@ config INIT_DEBUG ...@@ -739,13 +739,13 @@ config INIT_DEBUG
help help
Fill __init and __initdata at the end of boot. This is only for debugging. Fill __init and __initdata at the end of boot. This is only for debugging.
config KALLSYMS # config KALLSYMS
bool "Load all symbols for debugging/kksymoops" # bool "Load all symbols for debugging/kksymoops"
depends on DEBUG_KERNEL # depends on DEBUG_KERNEL
help # help
Say Y here to let the kernel print out symbolic crash information and # Say Y here to let the kernel print out symbolic crash information and
symbolic stack backtraces. This increases the size of the kernel # symbolic stack backtraces. This increases the size of the kernel
somewhat, as all symbols have to be loaded into the kernel image. # somewhat, as all symbols have to be loaded into the kernel image.
endmenu endmenu
......
...@@ -151,6 +151,7 @@ static int print_unex=1; ...@@ -151,6 +151,7 @@ static int print_unex=1;
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/version.h>
#define FDPATCHES #define FDPATCHES
#include <linux/fdreg.h> #include <linux/fdreg.h>
......
...@@ -1266,13 +1266,6 @@ MODULE_DESCRIPTION( ...@@ -1266,13 +1266,6 @@ MODULE_DESCRIPTION(
"Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams"); "Compression routines for zftape. Uses the lzrw3 algorithm by Ross Williams");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
#if LINUX_VERSION_CODE >= KERNEL_VER(2,1,18)
static int can_unload(void)
{
return keep_module_locked ? -EBUSY : 0;
}
#endif
/* Called by modules package when installing the driver /* Called by modules package when installing the driver
*/ */
int init_module(void) int init_module(void)
...@@ -1282,9 +1275,11 @@ int init_module(void) ...@@ -1282,9 +1275,11 @@ int init_module(void)
#if LINUX_VERSION_CODE < KERNEL_VER(2,1,18) #if LINUX_VERSION_CODE < KERNEL_VER(2,1,18)
register_symtab(0); /* remove global ftape symbols */ register_symtab(0); /* remove global ftape symbols */
#else #else
#if 0 /* FIXME --RR */
if (!mod_member_present(&__this_module, can_unload)) if (!mod_member_present(&__this_module, can_unload))
return -EBUSY; return -EBUSY;
__this_module.can_unload = can_unload; __this_module.can_unload = can_unload;
#endif
#endif #endif
result = zft_compressor_init(); result = zft_compressor_init();
keep_module_locked = 0; keep_module_locked = 0;
......
...@@ -31,14 +31,22 @@ static rwlock_t file_systems_lock = RW_LOCK_UNLOCKED; ...@@ -31,14 +31,22 @@ static rwlock_t file_systems_lock = RW_LOCK_UNLOCKED;
/* WARNING: This can be used only if we _already_ own a reference */ /* WARNING: This can be used only if we _already_ own a reference */
void get_filesystem(struct file_system_type *fs) void get_filesystem(struct file_system_type *fs)
{ {
if (fs->owner) if (!try_module_get(fs->owner)) {
__MOD_INC_USE_COUNT(fs->owner); #ifdef CONFIG_MODULE_UNLOAD
unsigned int cpu = get_cpu();
local_inc(&fs->owner->ref[cpu].count);
put_cpu();
#else
/* Getting filesystem while it's starting up? We're
already supposed to have a reference. */
BUG();
#endif
}
} }
void put_filesystem(struct file_system_type *fs) void put_filesystem(struct file_system_type *fs)
{ {
if (fs->owner) module_put(fs->owner);
__MOD_DEC_USE_COUNT(fs->owner);
} }
static struct file_system_type **find_filesystem(const char *name) static struct file_system_type **find_filesystem(const char *name)
......
...@@ -296,17 +296,6 @@ static struct file_operations proc_modules_operations = { ...@@ -296,17 +296,6 @@ static struct file_operations proc_modules_operations = {
.llseek = seq_lseek, .llseek = seq_lseek,
.release = seq_release, .release = seq_release,
}; };
extern struct seq_operations ksyms_op;
static int ksyms_open(struct inode *inode, struct file *file)
{
return seq_open(file, &ksyms_op);
}
static struct file_operations proc_ksyms_operations = {
.open = ksyms_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#endif #endif
extern struct seq_operations slabinfo_op; extern struct seq_operations slabinfo_op;
...@@ -604,7 +593,6 @@ void __init proc_misc_init(void) ...@@ -604,7 +593,6 @@ void __init proc_misc_init(void)
create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations); create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
#ifdef CONFIG_MODULES #ifdef CONFIG_MODULES
create_seq_entry("modules", 0, &proc_modules_operations); create_seq_entry("modules", 0, &proc_modules_operations);
create_seq_entry("ksyms", 0, &proc_ksyms_operations);
#endif #endif
proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL); proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
if (proc_root_kcore) { if (proc_root_kcore) {
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/node.h> #include <linux/node.h>
#include <asm/semaphore.h>
struct cpu { struct cpu {
int node_id; /* The node which contains the CPU */ int node_id; /* The node which contains the CPU */
...@@ -29,4 +30,6 @@ struct cpu { ...@@ -29,4 +30,6 @@ struct cpu {
extern int register_cpu(struct cpu *, int, struct node *); extern int register_cpu(struct cpu *, int, struct node *);
/* Stop CPUs going up and down. */
extern struct semaphore cpucontrol;
#endif /* _LINUX_CPU_H_ */ #endif /* _LINUX_CPU_H_ */
...@@ -198,6 +198,9 @@ typedef struct { ...@@ -198,6 +198,9 @@ typedef struct {
#define ELF32_R_SYM(x) ((x) >> 8) #define ELF32_R_SYM(x) ((x) >> 8)
#define ELF32_R_TYPE(x) ((x) & 0xff) #define ELF32_R_TYPE(x) ((x) & 0xff)
#define ELF64_R_SYM(i) ((i) >> 32)
#define ELF64_R_TYPE(i) ((i) & 0xffffffff)
#define R_386_NONE 0 #define R_386_NONE 0
#define R_386_32 1 #define R_386_32 1
#define R_386_PC32 2 #define R_386_PC32 2
...@@ -295,6 +298,7 @@ typedef struct { ...@@ -295,6 +298,7 @@ typedef struct {
#define R_SPARC_PCPLT10 29 #define R_SPARC_PCPLT10 29
#define R_SPARC_10 30 #define R_SPARC_10 30
#define R_SPARC_11 31 #define R_SPARC_11 31
#define R_SPARC_64 32
#define R_SPARC_WDISP16 40 #define R_SPARC_WDISP16 40
#define R_SPARC_WDISP19 41 #define R_SPARC_WDISP19 41
#define R_SPARC_7 43 #define R_SPARC_7 43
...@@ -369,6 +373,47 @@ typedef struct { ...@@ -369,6 +373,47 @@ typedef struct {
#define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */ #define R_ALPHA_JMP_SLOT 26 /* Create PLT entry */
#define R_ALPHA_RELATIVE 27 /* Adjust by program base */ #define R_ALPHA_RELATIVE 27 /* Adjust by program base */
/* PowerPC relocations defined by the ABIs */
#define R_PPC_NONE 0
#define R_PPC_ADDR32 1 /* 32bit absolute address */
#define R_PPC_ADDR24 2 /* 26bit address, 2 bits ignored. */
#define R_PPC_ADDR16 3 /* 16bit absolute address */
#define R_PPC_ADDR16_LO 4 /* lower 16bit of absolute address */
#define R_PPC_ADDR16_HI 5 /* high 16bit of absolute address */
#define R_PPC_ADDR16_HA 6 /* adjusted high 16bit */
#define R_PPC_ADDR14 7 /* 16bit address, 2 bits ignored */
#define R_PPC_ADDR14_BRTAKEN 8
#define R_PPC_ADDR14_BRNTAKEN 9
#define R_PPC_REL24 10 /* PC relative 26 bit */
#define R_PPC_REL14 11 /* PC relative 16 bit */
#define R_PPC_REL14_BRTAKEN 12
#define R_PPC_REL14_BRNTAKEN 13
#define R_PPC_GOT16 14
#define R_PPC_GOT16_LO 15
#define R_PPC_GOT16_HI 16
#define R_PPC_GOT16_HA 17
#define R_PPC_PLTREL24 18
#define R_PPC_COPY 19
#define R_PPC_GLOB_DAT 20
#define R_PPC_JMP_SLOT 21
#define R_PPC_RELATIVE 22
#define R_PPC_LOCAL24PC 23
#define R_PPC_UADDR32 24
#define R_PPC_UADDR16 25
#define R_PPC_REL32 26
#define R_PPC_PLT32 27
#define R_PPC_PLTREL32 28
#define R_PPC_PLT16_LO 29
#define R_PPC_PLT16_HI 30
#define R_PPC_PLT16_HA 31
#define R_PPC_SDAREL16 32
#define R_PPC_SECTOFF 33
#define R_PPC_SECTOFF_LO 34
#define R_PPC_SECTOFF_HI 35
#define R_PPC_SECTOFF_HA 36
/* Keep this the last entry. */
#define R_PPC_NUM 37
/* Legal values for e_flags field of Elf64_Ehdr. */ /* Legal values for e_flags field of Elf64_Ehdr. */
#define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */ #define EF_ALPHA_32BIT 1 /* All addresses are below 2GB */
......
...@@ -38,17 +38,30 @@ ...@@ -38,17 +38,30 @@
* Also note, that this data cannot be "const". * Also note, that this data cannot be "const".
*/ */
#ifndef MODULE /* These are for everybody (although not all archs will actually
discard it in modules) */
#define __init __attribute__ ((__section__ (".init.text")))
#define __initdata __attribute__ ((__section__ (".init.data")))
#define __exit __attribute__ ((__section__(".exit.text")))
#define __exitdata __attribute__ ((__section__(".exit.data")))
#define __exit_call __attribute__ ((unused,__section__ (".exitcall.exit")))
#ifndef __ASSEMBLY__ /* For assembly routines */
#define __INIT .section ".init.text","ax"
#define __FINIT .previous
#define __INITDATA .section ".init.data","aw"
#ifndef __ASSEMBLY__
/* /*
* Used for initialization calls.. * Used for initialization calls..
*/ */
typedef int (*initcall_t)(void); typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void); typedef void (*exitcall_t)(void);
#endif
extern initcall_t __initcall_start, __initcall_end; #ifndef MODULE
#ifndef __ASSEMBLY__
/* initcalls are now grouped by functionality into separate /* initcalls are now grouped by functionality into separate
* subsections. Ordering inside the subsections is determined * subsections. Ordering inside the subsections is determined
...@@ -85,37 +98,19 @@ extern struct kernel_param __setup_start, __setup_end; ...@@ -85,37 +98,19 @@ extern struct kernel_param __setup_start, __setup_end;
#define __setup(str, fn) \ #define __setup(str, fn) \
static char __setup_str_##fn[] __initdata = str; \ static char __setup_str_##fn[] __initdata = str; \
static struct kernel_param __setup_##fn __attribute__((unused)) __initsetup = { __setup_str_##fn, fn } static struct kernel_param __setup_##fn \
__attribute__((unused,__section__ (".init.setup"))) \
= { __setup_str_##fn, fn }
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/*
* Mark functions and data as being only used at initialization
* or exit time.
*/
#define __init __attribute__ ((__section__ (".init.text")))
#define __exit __attribute__ ((unused, __section__(".exit.text")))
#define __initdata __attribute__ ((__section__ (".init.data")))
#define __exitdata __attribute__ ((unused, __section__ (".exit.data")))
#define __initsetup __attribute__ ((unused,__section__ (".init.setup")))
#define __init_call(level) __attribute__ ((unused,__section__ (".initcall" level ".init")))
#define __exit_call __attribute__ ((unused,__section__ (".exitcall.exit")))
/* For assembly routines */
#define __INIT .section ".init.text","ax"
#define __FINIT .previous
#define __INITDATA .section ".init.data","aw"
/** /**
* module_init() - driver initialization entry point * module_init() - driver initialization entry point
* @x: function to be run at kernel boot time or module insertion * @x: function to be run at kernel boot time or module insertion
* *
* module_init() will add the driver initialization routine in * module_init() will either be called during do_initcalls (if
* the "__initcall.int" code segment if the driver is checked as * builtin) or at module insertion time (if a module). There can only
* "y" or static, or else it will wrap the driver initialization * be one per module. */
* routine with init_module() which is used by insmod and
* modprobe when the driver is used as a module.
*/
#define module_init(x) __initcall(x); #define module_init(x) __initcall(x);
/** /**
...@@ -126,39 +121,21 @@ extern struct kernel_param __setup_start, __setup_end; ...@@ -126,39 +121,21 @@ extern struct kernel_param __setup_start, __setup_end;
* with cleanup_module() when used with rmmod when * with cleanup_module() when used with rmmod when
* the driver is a module. If the driver is statically * the driver is a module. If the driver is statically
* compiled into the kernel, module_exit() has no effect. * compiled into the kernel, module_exit() has no effect.
* There can only be one per module.
*/ */
#define module_exit(x) __exitcall(x); #define module_exit(x) __exitcall(x);
#else /**
* no_module_init - code needs no initialization.
#define __init *
#define __exit * The equivalent of declaring an empty init function which returns 0.
#define __initdata * Every module must have exactly one module_init() or no_module_init
#define __exitdata * invocation. */
#define __initcall(fn) #define no_module_init
/* For assembly routines */
#define __INIT
#define __FINIT
#define __INITDATA
/* These macros create a dummy inline: gcc 2.9x does not count alias
as usage, hence the `unused function' warning when __init functions
are declared static. We use the dummy __*_module_inline functions
both to kill the warning and check the type of the init/cleanup
function. */
typedef int (*__init_module_func_t)(void);
typedef void (*__cleanup_module_func_t)(void);
#define module_init(x) \
int init_module(void) __attribute__((alias(#x))); \
static inline __init_module_func_t __init_module_inline(void) \
{ return x; }
#define module_exit(x) \
void cleanup_module(void) __attribute__((alias(#x))); \
static inline __cleanup_module_func_t __cleanup_module_inline(void) \
{ return x; }
#define __setup(str,func) /* nothing */ #else /* MODULE */
/* Don't use these in modules, but some people do... */
#define core_initcall(fn) module_init(fn) #define core_initcall(fn) module_init(fn)
#define postcore_initcall(fn) module_init(fn) #define postcore_initcall(fn) module_init(fn)
#define arch_initcall(fn) module_init(fn) #define arch_initcall(fn) module_init(fn)
...@@ -167,6 +144,34 @@ typedef void (*__cleanup_module_func_t)(void); ...@@ -167,6 +144,34 @@ typedef void (*__cleanup_module_func_t)(void);
#define device_initcall(fn) module_init(fn) #define device_initcall(fn) module_init(fn)
#define late_initcall(fn) module_init(fn) #define late_initcall(fn) module_init(fn)
/* Each module knows its own name. */
#define __DEFINE_MODULE_NAME \
char __module_name[] __attribute__((section(".modulename"))) = \
__stringify(KBUILD_MODNAME)
/* These macros create a dummy inline: gcc 2.9x does not count alias
as usage, hence the `unused function' warning when __init functions
are declared static. We use the dummy __*_module_inline functions
both to kill the warning and check the type of the init/cleanup
function. */
/* Each module must use one module_init(), or one no_module_init */
#define module_init(initfn) \
__DEFINE_MODULE_NAME; \
static inline initcall_t __inittest(void) \
{ return initfn; } \
int __initfn(void) __attribute__((alias(#initfn)));
#define no_module_init __DEFINE_MODULE_NAME
/* This is only required if you want to be unloadable. */
#define module_exit(exitfn) \
static inline exitcall_t __exittest(void) \
{ return exitfn; } \
void __exitfn(void) __attribute__((alias(#exitfn)));
#define __setup(str,func) /* nothing */
#endif #endif
/* Data marked not to be saved by software_suspend() */ /* Data marked not to be saved by software_suspend() */
......
...@@ -28,6 +28,7 @@ extern int request_module(const char * name); ...@@ -28,6 +28,7 @@ extern int request_module(const char * name);
static inline int request_module(const char * name) { return -ENOSYS; } static inline int request_module(const char * name) { return -ENOSYS; }
#endif #endif
#define try_then_request_module(x, mod) ((x) ?: request_module(mod), (x))
extern int exec_usermodehelper(char *program_path, char *argv[], char *envp[]); extern int exec_usermodehelper(char *program_path, char *argv[], char *envp[]);
extern int call_usermodehelper(char *path, char *argv[], char *envp[]); extern int call_usermodehelper(char *path, char *argv[], char *envp[]);
......
#ifndef _LINUX_MODULE_H
#define _LINUX_MODULE_H
/* /*
* Dynamic loading of modules into the kernel. * Dynamic loading of modules into the kernel.
* *
* Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996 * Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
* Rewritten again by Rusty Russell, 2002
*/ */
#ifndef _LINUX_MODULE_H
#define _LINUX_MODULE_H
#include <linux/config.h> #include <linux/config.h>
#include <linux/sched.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/errno.h> #include <linux/elf.h>
#include <linux/stat.h>
#include <asm/atomic.h> #include <linux/compiler.h>
#include <linux/cache.h>
/* Don't need to bring in all of uaccess.h just for this decl. */ #include <linux/kmod.h>
struct exception_table_entry; #include <asm/module.h>
#include <asm/uaccess.h> /* For struct exception_table_entry */
/* Used by get_kernel_syms, which is obsolete. */
struct kernel_sym /* Not Yet Implemented */
{ #define MODULE_LICENSE(name)
unsigned long value; #define MODULE_AUTHOR(name)
char name[60]; /* should have been 64-sizeof(long); oh well */ #define MODULE_DESCRIPTION(desc)
}; #define MODULE_SUPPORTED_DEVICE(name)
#define MODULE_GENERIC_TABLE(gtype,name)
#define MODULE_DEVICE_TABLE(type,name)
#define MODULE_PARM_DESC(var,desc)
#define print_symbol(format, addr)
#define print_modules()
struct module_symbol #define MODULE_NAME_LEN (64 - sizeof(unsigned long))
struct kernel_symbol
{ {
unsigned long value; unsigned long value;
const char *name; char name[MODULE_NAME_LEN];
}; };
struct module_ref #ifdef MODULE
{ /* This is magically filled in by the linker, but THIS_MODULE must be
struct module *dep; /* "parent" pointer */ a constant so it works in initializers. */
struct module *ref; /* "child" pointer */ extern struct module __this_module;
struct module_ref *next_ref; #define THIS_MODULE (&__this_module)
}; #else
#define THIS_MODULE ((struct module *)0)
/* TBD */ #endif
struct module_persist;
struct module #ifdef CONFIG_MODULES
/* Get/put a kernel symbol (calls must be symmetric) */
void *__symbol_get(const char *symbol);
void *__symbol_get_gpl(const char *symbol);
#define symbol_get(x) ((typeof(&x))(__symbol_get(#x)))
#define symbol_put(x) __symbol_put(#x)
/* For every exported symbol, place a struct in the __ksymtab section */
#define EXPORT_SYMBOL(sym) \
const struct kernel_symbol __ksymtab_##sym \
__attribute__((section("__ksymtab"))) \
= { (unsigned long)&sym, #sym }
#define EXPORT_SYMBOL_NOVERS(sym) EXPORT_SYMBOL(sym)
#define EXPORT_SYMBOL_GPL(sym) EXPORT_SYMBOL(sym)
struct kernel_symbol_group
{ {
unsigned long size_of_struct; /* == sizeof(module) */ /* Links us into the global symbol list */
struct module *next; struct list_head list;
const char *name;
unsigned long size;
union
{
atomic_t usecount;
long pad;
} uc; /* Needs to keep its size - so says rth */
unsigned long flags; /* AUTOCLEAN et al */
unsigned nsyms; /* Module which owns it (if any) */
unsigned ndeps; struct module *owner;
struct module_symbol *syms; unsigned int num_syms;
struct module_ref *deps; const struct kernel_symbol *syms;
struct module_ref *refs;
int (*init)(void);
void (*cleanup)(void);
const struct exception_table_entry *ex_table_start;
const struct exception_table_entry *ex_table_end;
#ifdef __alpha__
unsigned long gp;
#endif
/* Members past this point are extensions to the basic
module support and are optional. Use mod_member_present()
to examine them. */
const struct module_persist *persist_start;
const struct module_persist *persist_end;
int (*can_unload)(void);
int runsize; /* In modutils, not currently used */
const char *kallsyms_start; /* All symbols for kernel debugging */
const char *kallsyms_end;
const char *archdata_start; /* arch specific data for module */
const char *archdata_end;
const char *kernel_data; /* Reserved for kernel internal use */
}; };
struct module_info struct exception_table
{ {
unsigned long addr;
unsigned long size;
unsigned long flags;
long usecount;
};
/* Bits of module.flags. */
#define MOD_UNINITIALIZED 0
#define MOD_RUNNING 1
#define MOD_DELETED 2
#define MOD_AUTOCLEAN 4
#define MOD_VISITED 8
#define MOD_USED_ONCE 16
#define MOD_JUST_FREED 32
#define MOD_INITIALIZING 64
/* Values for query_module's which. */
#define QM_MODULES 1
#define QM_DEPS 2
#define QM_REFS 3
#define QM_SYMBOLS 4
#define QM_INFO 5
/* Can the module be queried? */
#define MOD_CAN_QUERY(mod) (((mod)->flags & (MOD_RUNNING | MOD_INITIALIZING)) && !((mod)->flags & MOD_DELETED))
/* When struct module is extended, we must test whether the new member
is present in the header received from insmod before we can use it.
This function returns true if the member is present. */
#define mod_member_present(mod,member) \
((unsigned long)(&((struct module *)0L)->member + 1) \
<= (mod)->size_of_struct)
/*
* Ditto for archdata. Assumes mod->archdata_start and mod->archdata_end
* are validated elsewhere.
*/
#define mod_archdata_member_present(mod, type, member) \
(((unsigned long)(&((type *)0L)->member) + \
sizeof(((type *)0L)->member)) <= \
((mod)->archdata_end - (mod)->archdata_start))
/* Check if an address p with number of entries n is within the body of module m */
#define mod_bound(p, n, m) ((unsigned long)(p) >= ((unsigned long)(m) + ((m)->size_of_struct)) && \
(unsigned long)((p)+(n)) <= (unsigned long)(m) + (m)->size)
/* Backwards compatibility definition. */
#define GET_USE_COUNT(module) (atomic_read(&(module)->uc.usecount))
/* Poke the use count of a module. */
#define __MOD_INC_USE_COUNT(mod) \
(atomic_inc(&(mod)->uc.usecount), (mod)->flags |= MOD_VISITED|MOD_USED_ONCE)
#define __MOD_DEC_USE_COUNT(mod) \
(atomic_dec(&(mod)->uc.usecount), (mod)->flags |= MOD_VISITED)
#define __MOD_IN_USE(mod) \
(mod_member_present((mod), can_unload) && (mod)->can_unload \
? (mod)->can_unload() : atomic_read(&(mod)->uc.usecount))
/* Indirect stringification. */
#define __MODULE_STRING_1(x) #x
#define __MODULE_STRING(x) __MODULE_STRING_1(x)
/* Generic inter module communication.
*
* NOTE: This interface is intended for small amounts of data that are
* passed between two objects and either or both of the objects
* might be compiled as modules. Do not over use this interface.
*
* If more than two objects need to communicate then you probably
* need a specific interface instead of abusing this generic
* interface. If both objects are *always* built into the kernel
* then a global extern variable is good enough, you do not need
* this interface.
*
* Keith Owens <kaos@ocs.com.au> 28 Oct 2000.
*/
#ifdef __KERNEL__
#define HAVE_INTER_MODULE
extern void inter_module_register(const char *, struct module *, const void *);
extern void inter_module_unregister(const char *);
extern const void *inter_module_get(const char *);
extern const void *inter_module_get_request(const char *, const char *);
extern void inter_module_put(const char *);
struct inter_module_entry {
struct list_head list; struct list_head list;
const char *im_name;
struct module *owner; unsigned int num_entries;
const void *userdata; const struct exception_table_entry *entry;
}; };
extern int try_inc_mod_count(struct module *mod); struct module_ref
#endif /* __KERNEL__ */ {
atomic_t count;
} ____cacheline_aligned;
#if defined(MODULE) && !defined(__GENKSYMS__) struct module
{
/* Am I live (yet)? */
int live;
/* Embedded module documentation macros. */ /* Member of list of modules */
struct list_head list;
/* For documentation purposes only. */ /* Unique handle for this module */
char name[MODULE_NAME_LEN];
#define MODULE_AUTHOR(name) \ /* Exported symbols */
const char __module_author[] __attribute__((section(".modinfo"))) = \ struct kernel_symbol_group symbols;
"author=" name
#define MODULE_DESCRIPTION(desc) \ /* Exception tables */
const char __module_description[] __attribute__((section(".modinfo"))) = \ struct exception_table extable;
"description=" desc
/* Could potentially be used by kmod... */ /* Startup function. */
int (*init)(void);
#define MODULE_SUPPORTED_DEVICE(dev) \ /* If this is non-NULL, vfree after init() returns */
const char __module_device[] __attribute__((section(".modinfo"))) = \ void *module_init;
"device=" dev
/* Used to verify parameters given to the module. The TYPE arg should /* Here is the actual code + data, vfree'd on unload. */
be a string in the following format: void *module_core;
[min[-max]]{b,h,i,l,s}
The MIN and MAX specifiers delimit the length of the array. If MAX
is omitted, it defaults to MIN; if both are omitted, the default is 1.
The final character is a type specifier:
b byte
h short
i int
l long
s string
*/
#define MODULE_PARM(var,type) \ /* Here are the sizes of the init and core sections */
const char __module_parm_##var[] \ unsigned long init_size, core_size;
__attribute__((section(".modinfo"))) = \
"parm_" __MODULE_STRING(var) "=" type
#define MODULE_PARM_DESC(var,desc) \ /* Arch-specific module values */
const char __module_parm_desc_##var[] \ struct mod_arch_specific arch;
__attribute__((section(".modinfo"))) = \
"parm_desc_" __MODULE_STRING(var) "=" desc
/* /* Am I unsafe to unload? */
* MODULE_DEVICE_TABLE exports information about devices int unsafe;
* currently supported by this module. A device type, such as PCI,
* is a C-like identifier passed as the first arg to this macro.
* The second macro arg is the variable containing the device
* information being made public.
*
* The following is a list of known device types (arg 1),
* and the C types which are to be passed as arg 2.
* pci - struct pci_device_id - List of PCI ids supported by this module
* isapnp - struct isapnp_device_id - List of ISA PnP ids supported by this module
* usb - struct usb_device_id - List of USB ids supported by this module
*/
#define MODULE_GENERIC_TABLE(gtype,name) \
static const unsigned long __module_##gtype##_size \
__attribute__ ((unused)) = sizeof(struct gtype##_id); \
static const struct gtype##_id * __module_##gtype##_table \
__attribute__ ((unused)) = name
/* #ifdef CONFIG_MODULE_UNLOAD
* The following license idents are currently accepted as indicating free /* Reference counts */
* software modules struct module_ref ref[NR_CPUS];
*
* "GPL" [GNU Public License v2 or later]
* "GPL v2" [GNU Public License v2]
* "GPL and additional rights" [GNU Public License v2 rights and more]
* "Dual BSD/GPL" [GNU Public License v2 or BSD license choice]
* "Dual MPL/GPL" [GNU Public License v2 or Mozilla license choice]
*
* The following other idents are available
*
* "Proprietary" [Non free products]
*
* There are dual licensed components, but when running with Linux it is the
* GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL
* is a GPL combined work.
*
* This exists for several reasons
* 1. So modinfo can show license info for users wanting to vet their setup
* is free
* 2. So the community can ignore bug reports including proprietary modules
* 3. So vendors can do likewise based on their own policies
*/
#define MODULE_LICENSE(license) \ /* What modules depend on me? */
static const char __module_license[] \ struct list_head modules_which_use_me;
__attribute__((section(".modinfo"), unused)) = "license=" license
/* Define the module variable, and usage macros. */ /* Who is waiting for us to be unloaded */
extern struct module __this_module; struct task_struct *waiter;
#define THIS_MODULE (&__this_module) /* Destruction function. */
#define MOD_INC_USE_COUNT __MOD_INC_USE_COUNT(THIS_MODULE) void (*exit)(void);
#define MOD_DEC_USE_COUNT __MOD_DEC_USE_COUNT(THIS_MODULE)
#define MOD_IN_USE __MOD_IN_USE(THIS_MODULE)
#include <linux/version.h>
static const char __module_kernel_version[]
__attribute__((section(".modinfo"), unused)) =
"kernel_version=" UTS_RELEASE;
#ifdef CONFIG_MODVERSIONS
static const char __module_using_checksums[]
__attribute__((section(".modinfo"), unused)) =
"using_checksums=1";
#endif #endif
#else /* MODULE */ /* The command line arguments (may be mangled). People like
keeping pointers to this stuff */
#define MODULE_AUTHOR(name) char args[0];
#define MODULE_LICENSE(license) };
#define MODULE_DESCRIPTION(desc)
#define MODULE_SUPPORTED_DEVICE(name)
#define MODULE_PARM(var,type)
#define MODULE_PARM_DESC(var,desc)
/* Create a dummy reference to the table to suppress gcc unused warnings. Put
* the reference in the .data.exit section which is discarded when code is built
* in, so the reference does not bloat the running kernel. Note: cannot be
* const, other exit data may be writable.
*/
#define MODULE_GENERIC_TABLE(gtype,name) \
static const struct gtype##_id * __module_##gtype##_table \
__attribute__ ((unused, __section__(".exit.data"))) = name
#ifndef __GENKSYMS__
#define THIS_MODULE NULL
#define MOD_INC_USE_COUNT do { } while (0)
#define MOD_DEC_USE_COUNT do { } while (0)
#define MOD_IN_USE 1
#endif /* !__GENKSYMS__ */ /* Helper function for arch-specific module loaders */
unsigned long find_symbol_internal(Elf_Shdr *sechdrs,
unsigned int symindex,
const char *strtab,
const char *name,
struct module *mod,
struct kernel_symbol_group **group);
/* These must be implemented by the specific architecture */
/* vmalloc AND zero for the non-releasable code; return ERR_PTR() on error. */
void *module_core_alloc(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *secstrings,
struct module *mod);
/* vmalloc and zero (if any) for sections to be freed after init.
Return ERR_PTR() on error. */
void *module_init_alloc(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *secstrings,
struct module *mod);
/* Apply the given relocation to the (simplified) ELF. Return -error
or 0. */
int apply_relocate(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *mod);
/* Apply the given add relocation to the (simplified) ELF. Return
-error or 0 */
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *mod);
/* Any final processing of module before access. Return -error or 0. */
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod);
/* Free memory returned from module_core_alloc/module_init_alloc */
void module_free(struct module *mod, void *module_region);
#ifdef CONFIG_MODULE_UNLOAD
void __symbol_put(const char *symbol);
void symbol_put_addr(void *addr);
/* We only need protection against local interrupts. */
#ifndef __HAVE_ARCH_LOCAL_INC
#define local_inc(x) atomic_inc(x)
#define local_dec(x) atomic_dec(x)
#endif
#endif /* MODULE */ static inline int try_module_get(struct module *module)
{
int ret = 1;
if (module) {
unsigned int cpu = get_cpu();
if (likely(module->live))
local_inc(&module->ref[cpu].count);
else
ret = 0;
put_cpu();
}
return ret;
}
#define MODULE_DEVICE_TABLE(type,name) \ static inline void module_put(struct module *module)
MODULE_GENERIC_TABLE(type##_device,name) {
if (module) {
unsigned int cpu = get_cpu();
local_dec(&module->ref[cpu].count);
/* Maybe they're waiting for us to drop reference? */
if (unlikely(!module->live))
wake_up_process(module->waiter);
put_cpu();
}
}
/* Export a symbol either from the kernel or a module. #else /*!CONFIG_MODULE_UNLOAD*/
static inline int try_module_get(struct module *module)
{
return !module || module->live;
}
static inline void module_put(struct module *module)
{
}
#define symbol_put(x) do { } while(0)
#define symbol_put_addr(p) do { } while(0)
In the kernel, the symbol is added to the kernel's global symbol table. #endif /* CONFIG_MODULE_UNLOAD */
In a module, it controls which variables are exported. If no #define __unsafe(mod) \
variables are explicitly exported, the action is controled by the do { \
insmod -[xX] flags. Otherwise, only the variables listed are exported. if (mod && !(mod)->unsafe) { \
This obviates the need for the old register_symtab() function. */ printk(KERN_WARNING \
"Module %s cannot be unloaded due to unsafe usage in" \
" %s:%u\n", (mod)->name, __FILE__, __LINE__); \
(mod)->unsafe = 1; \
} \
} while(0)
/* So how does the CONFIG_MODVERSIONS magic work? #else /* !CONFIG_MODULES... */
* #define EXPORT_SYMBOL(sym)
* A module can only be loaded if it's undefined symbols can be resolved #define EXPORT_SYMBOL_GPL(sym)
* using symbols the kernel exports for that purpose. The idea behind #define EXPORT_SYMBOL_NOVERS(sym)
* CONFIG_MODVERSIONS is to mangle those symbols depending on their
* definition (see man genksyms) - a change in the definition will thus
* caused the mangled name to change, and the module will refuse to
* load due to unresolved symbols.
*
* Let's start with taking a look how things work when we don't use
* CONFIG_MODVERSIONS. In this case, the only thing which is worth
* mentioning is the EXPORT_SYMBOL() macro. Using EXPORT_SYMBOL(foo)
* will expand into __EXPORT_SYMBOL(foo, "foo"), which then uses
* some ELF section magic to generate a list of pairs
* (address, symbol_name), which is used to resolve undefined
* symbols into addresses when loading a module.
*
* That's easy. Let's get back to CONFIG_MODVERSIONS=y.
*
* The first step is to generate the checksums. This is done at
* "make dep" time, code which exports symbols (using EXPORT_SYMTAB)
* is preprocessed with the additional macro __GENKSYMS__ set and fed
* into genksyms.
* At this stage, for each file that exports symbols an corresponding
* file in include/linux/module is generated, which for each exported
* symbol contains
*
* #define __ver_schedule_task 2d6c3d04
* #define schedule_task _set_ver(schedule_task)
*
* In addition, include/linux/modversions.h is generated, which
* looks like
*
* #include <linux/modsetver.h>
* #include <linux/modules/kernel__context.ver>
* <<<lists all of the files just described>>>
*
* Let's see what happens for different cases during compilation.
*
* o compile a file into the kernel which does not export symbols:
*
* Since the file is known to not export symbols (it's not listed
* in the export-objs variable in the corresponding Makefile), the
* kernel build system does compile it with no extra flags set.
* The macro EXPORT_SYMTAB is unset, and you can see below that
* files which still try to use EXPORT_SYMBOL() will be trapped.
* Other than that, just regular compilation.
*
* o compile a file into the kernel which does export symbols:
*
* In this case, the file will compiled with the macro
* EXPORT_SYMTAB defined.
* As MODULE is not set, we hit this case from below:
*
* #define _set_ver(sym) sym
* #include <linux/modversions.h>
*
* #define EXPORT_SYMBOL(var) \
* __EXPORT_SYMBOL(var, __MODULE_STRING(__VERSIONED_SYMBOL(var)))
*
* The first two lines will in essence include
*
* #define __ver_schedule_task 2d6c3d04
* #define schedule_task schedule_task
*
* for each symbol. The second line really doesn't do much, but the
* first one gives us the checksums we generated before.
*
* So EXPORT_SYMBOL(schedule_task) will expand into
* __EXPORT_SYMBOL(schedule_task, "schedule_task_R2d6c3d04"),
* hence exporting the symbol for schedule_task under the name of
* schedule_task_R2d6c3d04.
*
* o compile a file into a module
*
* In this case, the kernel build system will add
* "-include include/linux/modversions.h" to the command line. So
* modversions.h is prepended to the actual source, turning into
*
* #define __ver_schedule_task 2d6c3d04
* #define schedule_task schedule_task_R2d6c3d04
*
* Though the source code says "schedule_task", the compiler will
* see the mangled symbol everywhere. So the module will end up with
* an undefined symbol "schedule_task_R2d6c3d04" - which is exactly
* the symbols which occurs in the kernel's list of symbols, with
* a value of &schedule_task - it all comes together nicely.
*
* One question remains: What happens if a module itself exports
* a symbol - the answer is simple: It's actually handled as the
* CONFIG_MODVERSIONS=n case described first, only that the compiler
* sees the mangled symbol everywhere. So &foo_R12345678 is exported
* with the name "foo_R12345678". Think about it. It all makes sense.
*/
#if defined(__GENKSYMS__) /* Get/put a kernel symbol (calls should be symmetric) */
#define symbol_get(x) (&(x))
#define symbol_put(x) do { } while(0)
/* We want the EXPORT_SYMBOL tag left intact for recognition. */ #define try_module_get(module) 1
#define module_put(module) do { } while(0)
#elif !defined(CONFIG_MODULES) #define __unsafe(mod)
#endif /* CONFIG_MODULES */
#define __EXPORT_SYMBOL(sym,str) /* For archs to search exception tables */
#define EXPORT_SYMBOL(var) extern struct list_head extables;
#define EXPORT_SYMBOL_NOVERS(var) extern spinlock_t modlist_lock;
#define EXPORT_SYMBOL_GPL(var)
#elif !defined(EXPORT_SYMTAB) #define symbol_request(x) try_then_request_module(symbol_get(x), "symbol:" #x)
#define __EXPORT_SYMBOL(sym,str) error this_object_must_be_defined_as_export_objs_in_the_Makefile
#define EXPORT_SYMBOL(var) error this_object_must_be_defined_as_export_objs_in_the_Makefile
#define EXPORT_SYMBOL_NOVERS(var) error this_object_must_be_defined_as_export_objs_in_the_Makefile
#define EXPORT_SYMBOL_GPL(var) error this_object_must_be_defined_as_export_objs_in_the_Makefile
/* BELOW HERE ALL THESE ARE OBSOLETE AND WILL VANISH */
#define __MOD_INC_USE_COUNT(mod) \
do { __unsafe(mod); (void)try_module_get(mod); } while(0)
#define __MOD_DEC_USE_COUNT(mod) module_put(mod)
#define SET_MODULE_OWNER(dev) ((dev)->owner = THIS_MODULE)
/* People do this inside their init routines, when the module isn't
"live" yet. They should no longer be doing that, but
meanwhile... */
#if defined(CONFIG_MODULE_UNLOAD) && defined(MODULE)
#define MOD_INC_USE_COUNT \
do { __unsafe(THIS_MODULE); local_inc(&THIS_MODULE->ref[get_cpu()].count); put_cpu(); } while (0)
#else #else
#define MOD_INC_USE_COUNT \
#define __EXPORT_SYMBOL(sym, str) \ do { __unsafe(THIS_MODULE); (void)try_module_get(THIS_MODULE); } while (0)
const char __kstrtab_##sym[] \
__attribute__((section(".kstrtab"))) = str; \
const struct module_symbol __ksymtab_##sym \
__attribute__((section("__ksymtab"))) = \
{ (unsigned long)&sym, __kstrtab_##sym }
#define __EXPORT_SYMBOL_GPL(sym, str) \
const char __kstrtab_##sym[] \
__attribute__((section(".kstrtab"))) = "GPLONLY_" str; \
const struct module_symbol __ksymtab_##sym \
__attribute__((section("__ksymtab"))) = \
{ (unsigned long)&sym, __kstrtab_##sym }
#if defined(CONFIG_MODVERSIONS) && !defined(MODULE)
#define _set_ver(sym) sym
#include <linux/modversions.h>
#define EXPORT_SYMBOL(var) __EXPORT_SYMBOL(var, __MODULE_STRING(__VERSIONED_SYMBOL(var)))
#define EXPORT_SYMBOL_GPL(var) __EXPORT_SYMBOL(var, __MODULE_STRING(__VERSIONED_SYMBOL(var)))
#else /* !defined (CONFIG_MODVERSIONS) || defined(MODULE) */
#define EXPORT_SYMBOL(var) __EXPORT_SYMBOL(var, __MODULE_STRING(var))
#define EXPORT_SYMBOL_GPL(var) __EXPORT_SYMBOL_GPL(var, __MODULE_STRING(var))
#endif /* defined(CONFIG_MODVERSIONS) && !defined(MODULE) */
#define EXPORT_SYMBOL_NOVERS(var) __EXPORT_SYMBOL(var, __MODULE_STRING(var))
#endif /* __GENKSYMS__ */
/*
* Force a module to export no symbols.
* EXPORT_NO_SYMBOLS is default now, leave the define around for sources
* which still have it
*/
#define EXPORT_NO_SYMBOLS
#ifdef CONFIG_MODULES
/*
* Always allocate a section "__ksymtab". If we encounter EXPORT_SYMBOL,
* the exported symbol will be added to it.
* If it remains empty, that tells modutils that we do not want to
* export any symbols (as opposed to it not being present, which means
* "export all symbols" to modutils)
*/
__asm__(".section __ksymtab,\"a\"\n.previous");
#endif #endif
#define MOD_DEC_USE_COUNT module_put(THIS_MODULE)
#ifdef CONFIG_MODULES #define try_inc_mod_count(mod) try_module_get(mod)
#define SET_MODULE_OWNER(some_struct) do { (some_struct)->owner = THIS_MODULE; } while (0) #define MODULE_PARM(parm,string)
#else #define EXPORT_NO_SYMBOLS
#define SET_MODULE_OWNER(some_struct) do { } while (0) extern int module_dummy_usage;
#define GET_USE_COUNT(module) (module_dummy_usage)
#define MOD_IN_USE 0
#define __mod_between(a_start, a_len, b_start, b_len) \
(((a_start) >= (b_start) && (a_start) <= (b_start)+(b_len)) \
|| ((a_start)+(a_len) >= (b_start) \
&& (a_start)+(a_len) <= (b_start)+(b_len)))
#define mod_bound(p, n, m) \
(((m)->module_init \
&& __mod_between((p),(n),(m)->module_init,(m)->init_size)) \
|| __mod_between((p),(n),(m)->module_core,(m)->core_size))
/* Old-style "I'll just call it init_module and it'll be run at
insert". Use module_init(myroutine) instead. */
#ifdef MODULE
/* Used as "int init_module(void) { ... }". Get funky to insert modname. */
#define init_module(voidarg) \
__initfn(void); \
char __module_name[] __attribute__((section(".modulename"))) = \
__stringify(KBUILD_MODNAME); \
int __initfn(void)
#define cleanup_module(voidarg) __exitfn(void)
#endif #endif
extern void print_modules(void); /* Use symbol_get and symbol_put instead. You'll thank me. */
#define HAVE_INTER_MODULE
#if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS) extern void inter_module_register(const char *, struct module *, const void *);
extern void inter_module_unregister(const char *);
extern struct module *module_list; extern const void *inter_module_get(const char *);
extern const void *inter_module_get_request(const char *, const char *);
/* extern void inter_module_put(const char *);
* print_symbols takes a format string containing one %s.
* If support for resolving symbols is compiled in, the %s will
* be replaced by the closest symbol to the address and the entire
* string is printk()ed. Otherwise, nothing is printed.
*/
extern void print_symbol(const char *fmt, unsigned long address);
#else
static inline int
print_symbol(const char *fmt, unsigned long address)
{
return -ESRCH;
}
#endif
#endif /* _LINUX_MODULE_H */ #endif /* _LINUX_MODULE_H */
...@@ -116,21 +116,14 @@ config MODULES ...@@ -116,21 +116,14 @@ config MODULES
may want to make use of modules with this kernel in the future, then may want to make use of modules with this kernel in the future, then
say Y here. If unsure, say Y. say Y here. If unsure, say Y.
config MODVERSIONS config MODULE_UNLOAD
bool "Set version information on all module symbols" bool "Module unloading"
depends on MODULES depends on MODULES
---help--- help
Usually, modules have to be recompiled whenever you switch to a new Without this option you will not be able to unload any
kernel. Saying Y here makes it possible, and safe, to use the modules (note that some modules may not be unloadable
same modules even after compiling a new kernel; this requires the anyway), which makes your kernel slightly smaller and
program modprobe. All the software needed for module support is in simpler. If unsure, say Y.
the modutils package (check the file <file:Documentation/Changes>
for location and latest version). NOTE: if you say Y here but don't
have the program genksyms (which is also contained in the above
mentioned modutils package), then the building of your kernel will
fail. If you are going to use modules that are generated from
non-kernel sources, you would benefit from this option. Otherwise
it's not that important. So, N ought to be a safe bet.
config KMOD config KMOD
bool "Kernel module loader" bool "Kernel module loader"
......
...@@ -406,9 +406,6 @@ asmlinkage void __init start_kernel(void) ...@@ -406,9 +406,6 @@ asmlinkage void __init start_kernel(void)
* this. But we do want output early, in case something goes wrong. * this. But we do want output early, in case something goes wrong.
*/ */
console_init(); console_init();
#ifdef CONFIG_MODULES
init_modules();
#endif
profile_init(); profile_init();
kmem_cache_init(); kmem_cache_init();
local_irq_enable(); local_irq_enable();
...@@ -457,6 +454,8 @@ asmlinkage void __init start_kernel(void) ...@@ -457,6 +454,8 @@ asmlinkage void __init start_kernel(void)
struct task_struct *child_reaper = &init_task; struct task_struct *child_reaper = &init_task;
extern initcall_t __initcall_start, __initcall_end;
static void __init do_initcalls(void) static void __init do_initcalls(void)
{ {
initcall_t *call; initcall_t *call;
......
...@@ -4,18 +4,18 @@ ...@@ -4,18 +4,18 @@
export-objs = signal.o sys.o kmod.o workqueue.o ksyms.o pm.o exec_domain.o \ export-objs = signal.o sys.o kmod.o workqueue.o ksyms.o pm.o exec_domain.o \
printk.o platform.o suspend.o dma.o module.o cpufreq.o \ printk.o platform.o suspend.o dma.o module.o cpufreq.o \
profile.o rcupdate.o profile.o rcupdate.o intermodule.o
obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
module.o exit.o itimer.o time.o softirq.o resource.o \ exit.o itimer.o time.o softirq.o resource.o \
sysctl.o capability.o ptrace.o timer.o user.o \ sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o futex.o platform.o pid.o \ signal.o sys.o kmod.o workqueue.o futex.o platform.o pid.o \
rcupdate.o rcupdate.o intermodule.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o obj-$(CONFIG_SMP) += cpu.o
obj-$(CONFIG_UID16) += uid16.o obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_MODULES) += ksyms.o obj-$(CONFIG_MODULES) += ksyms.o module.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_CPU_FREQ) += cpufreq.o obj-$(CONFIG_CPU_FREQ) += cpufreq.o
......
...@@ -211,7 +211,12 @@ get_exec_domain_list(char *page) ...@@ -211,7 +211,12 @@ get_exec_domain_list(char *page)
for (ep = exec_domains; ep && len < PAGE_SIZE - 80; ep = ep->next) for (ep = exec_domains; ep && len < PAGE_SIZE - 80; ep = ep->next)
len += sprintf(page + len, "%d-%d\t%-16s\t[%s]\n", len += sprintf(page + len, "%d-%d\t%-16s\t[%s]\n",
ep->pers_low, ep->pers_high, ep->name, ep->pers_low, ep->pers_high, ep->name,
ep->module ? ep->module->name : "kernel"); #ifdef CONFIG_MODULES
ep->module ? ep->module->name : "kernel"
#else
"kernel"
#endif
);
read_unlock(&exec_domains_lock); read_unlock(&exec_domains_lock);
return (len); return (len);
} }
......
/* Deprecated, do not use. Moved from module.c to here. --RR */
/* Written by Keith Owens <kaos@ocs.com.au> Oct 2000 */
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/slab.h>
/* inter_module functions are always available, even when the kernel is
* compiled without modules. Consumers of inter_module_xxx routines
* will always work, even when both are built into the kernel, this
* approach removes lots of #ifdefs in mainline code.
*/
static struct list_head ime_list = LIST_HEAD_INIT(ime_list);
static spinlock_t ime_lock = SPIN_LOCK_UNLOCKED;
static int kmalloc_failed;
struct inter_module_entry {
struct list_head list;
const char *im_name;
struct module *owner;
const void *userdata;
};
/**
* inter_module_register - register a new set of inter module data.
* @im_name: an arbitrary string to identify the data, must be unique
* @owner: module that is registering the data, always use THIS_MODULE
* @userdata: pointer to arbitrary userdata to be registered
*
* Description: Check that the im_name has not already been registered,
* complain if it has. For new data, add it to the inter_module_entry
* list.
*/
void inter_module_register(const char *im_name, struct module *owner, const void *userdata)
{
struct list_head *tmp;
struct inter_module_entry *ime, *ime_new;
if (!(ime_new = kmalloc(sizeof(*ime), GFP_KERNEL))) {
/* Overloaded kernel, not fatal */
printk(KERN_ERR
"Aiee, inter_module_register: cannot kmalloc entry for '%s'\n",
im_name);
kmalloc_failed = 1;
return;
}
memset(ime_new, 0, sizeof(*ime_new));
ime_new->im_name = im_name;
ime_new->owner = owner;
ime_new->userdata = userdata;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
spin_unlock(&ime_lock);
kfree(ime_new);
/* Program logic error, fatal */
printk(KERN_ERR "inter_module_register: duplicate im_name '%s'", im_name);
BUG();
}
}
list_add(&(ime_new->list), &ime_list);
spin_unlock(&ime_lock);
}
/**
* inter_module_unregister - unregister a set of inter module data.
* @im_name: an arbitrary string to identify the data, must be unique
*
* Description: Check that the im_name has been registered, complain if
* it has not. For existing data, remove it from the
* inter_module_entry list.
*/
void inter_module_unregister(const char *im_name)
{
struct list_head *tmp;
struct inter_module_entry *ime;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
list_del(&(ime->list));
spin_unlock(&ime_lock);
kfree(ime);
return;
}
}
spin_unlock(&ime_lock);
if (kmalloc_failed) {
printk(KERN_ERR
"inter_module_unregister: no entry for '%s', "
"probably caused by previous kmalloc failure\n",
im_name);
return;
}
else {
/* Program logic error, fatal */
printk(KERN_ERR "inter_module_unregister: no entry for '%s'", im_name);
BUG();
}
}
/**
* inter_module_get - return arbitrary userdata from another module.
* @im_name: an arbitrary string to identify the data, must be unique
*
* Description: If the im_name has not been registered, return NULL.
* Try to increment the use count on the owning module, if that fails
* then return NULL. Otherwise return the userdata.
*/
const void *inter_module_get(const char *im_name)
{
struct list_head *tmp;
struct inter_module_entry *ime;
const void *result = NULL;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
if (try_inc_mod_count(ime->owner))
result = ime->userdata;
break;
}
}
spin_unlock(&ime_lock);
return(result);
}
/**
* inter_module_get_request - im get with automatic request_module.
* @im_name: an arbitrary string to identify the data, must be unique
* @modname: module that is expected to register im_name
*
* Description: If inter_module_get fails, do request_module then retry.
*/
const void *inter_module_get_request(const char *im_name, const char *modname)
{
const void *result = inter_module_get(im_name);
if (!result) {
request_module(modname);
result = inter_module_get(im_name);
}
return(result);
}
/**
* inter_module_put - release use of data from another module.
* @im_name: an arbitrary string to identify the data, must be unique
*
* Description: If the im_name has not been registered, complain,
* otherwise decrement the use count on the owning module.
*/
void inter_module_put(const char *im_name)
{
struct list_head *tmp;
struct inter_module_entry *ime;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
if (ime->owner)
__MOD_DEC_USE_COUNT(ime->owner);
spin_unlock(&ime_lock);
return;
}
}
spin_unlock(&ime_lock);
printk(KERN_ERR "inter_module_put: no entry for '%s'", im_name);
BUG();
}
EXPORT_SYMBOL(inter_module_register);
EXPORT_SYMBOL(inter_module_unregister);
EXPORT_SYMBOL(inter_module_get);
EXPORT_SYMBOL(inter_module_get_request);
EXPORT_SYMBOL(inter_module_put);
...@@ -155,7 +155,7 @@ char modprobe_path[256] = "/sbin/modprobe"; ...@@ -155,7 +155,7 @@ char modprobe_path[256] = "/sbin/modprobe";
static int exec_modprobe(void * module_name) static int exec_modprobe(void * module_name)
{ {
static char * envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; static char * envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
char *argv[] = { modprobe_path, "-s", "-k", "--", (char*)module_name, NULL }; char *argv[] = { modprobe_path, "--", (char*)module_name, NULL };
int ret; int ret;
if (!system_running) if (!system_running)
......
...@@ -71,14 +71,6 @@ __attribute__((section("__ksymtab"))) = { ...@@ -71,14 +71,6 @@ __attribute__((section("__ksymtab"))) = {
}; };
#endif #endif
EXPORT_SYMBOL(inter_module_register);
EXPORT_SYMBOL(inter_module_unregister);
EXPORT_SYMBOL(inter_module_get);
EXPORT_SYMBOL(inter_module_get_request);
EXPORT_SYMBOL(inter_module_put);
EXPORT_SYMBOL(try_inc_mod_count);
/* process memory management */ /* process memory management */
EXPORT_SYMBOL(do_mmap_pgoff); EXPORT_SYMBOL(do_mmap_pgoff);
EXPORT_SYMBOL(do_munmap); EXPORT_SYMBOL(do_munmap);
......
/* Rewritten by Rusty Russell, on the backs of many others...
Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/config.h> #include <linux/config.h>
#include <linux/mm.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/module.h>
#include <asm/uaccess.h>
#include <linux/kallsyms.h>
#include <linux/vmalloc.h>
#include <linux/smp_lock.h>
#include <asm/pgalloc.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/kmod.h> #include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/fs.h> #include <linux/fcntl.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <asm/uaccess.h>
#include <asm/semaphore.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
/* #if 0
* Originally by Anonymous (as far as I know...) #define DEBUGP printk
* Linux version by Bas Laarhoven <bas@vimec.nl> #else
* 0.99.14 version by Jon Tombs <jon@gtex02.us.es>, #define DEBUGP(fmt , ...)
* Heavily modified by Bjorn Ekwall <bj0rn@blox.se> May 1994 (C) #endif
* Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
* Add MOD_INITIALIZING Keith Owens <kaos@ocs.com.au> Nov 1999
* Add kallsyms support, Keith Owens <kaos@ocs.com.au> Apr 2000
* Add asm/module support, IA64 has special requirements. Keith Owens <kaos@ocs.com.au> Sep 2000
* Fix assorted bugs in module verification. Keith Owens <kaos@ocs.com.au> Sep 2000
* Fix sys_init_module race, Andrew Morton <andrewm@uow.edu.au> Oct 2000
* http://www.uwsg.iu.edu/hypermail/linux/kernel/0008.3/0379.html
* Replace xxx_module_symbol with inter_module_xxx. Keith Owens <kaos@ocs.com.au> Oct 2000
* Add a module list lock for kernel fault race fixing. Alan Cox <alan@redhat.com>
*
* This source is covered by the GNU GPL, the same as all kernel sources.
*/
#if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS)
extern struct module_symbol __start___ksymtab[];
extern struct module_symbol __stop___ksymtab[];
extern const struct exception_table_entry __start___ex_table[]; extern const struct exception_table_entry __start___ex_table[];
extern const struct exception_table_entry __stop___ex_table[]; extern const struct exception_table_entry __stop___ex_table[];
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
/* Protects extables and symbol tables */
spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
extern const char __start___kallsyms[] __attribute__((weak)); /* The exception and symbol tables: start with kernel only. */
extern const char __stop___kallsyms[] __attribute__((weak)); LIST_HEAD(extables);
static LIST_HEAD(symbols);
/* modutils uses these exported symbols to figure out if static struct exception_table kernel_extable;
kallsyms support is present */ static struct kernel_symbol_group kernel_symbols;
EXPORT_SYMBOL(__start___kallsyms); /* List of modules, protected by module_mutex */
EXPORT_SYMBOL(__stop___kallsyms); static DECLARE_MUTEX(module_mutex);
LIST_HEAD(modules); /* FIXME: Accessed w/o lock on oops by some archs */
struct module kernel_module = /* Convenient structure for holding init and core sizes */
struct sizes
{ {
.size_of_struct = sizeof(struct module), unsigned long init_size;
.name = "", unsigned long core_size;
.uc = {ATOMIC_INIT(1)},
.flags = MOD_RUNNING,
.syms = __start___ksymtab,
.ex_table_start = __start___ex_table,
.ex_table_end = __stop___ex_table,
.kallsyms_start = __start___kallsyms,
.kallsyms_end = __stop___kallsyms,
}; };
struct module *module_list = &kernel_module; /* Find a symbol, return value and the symbol group */
static unsigned long __find_symbol(const char *name,
#endif /* defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS) */ struct kernel_symbol_group **group)
{
/* inter_module functions are always available, even when the kernel is struct kernel_symbol_group *ks;
* compiled without modules. Consumers of inter_module_xxx routines
* will always work, even when both are built into the kernel, this
* approach removes lots of #ifdefs in mainline code.
*/
static struct list_head ime_list = LIST_HEAD_INIT(ime_list);
static spinlock_t ime_lock = SPIN_LOCK_UNLOCKED;
static int kmalloc_failed;
/*
* This lock prevents modifications that might race the kernel fault
* fixups. It does not prevent reader walks that the modules code
* does. The kernel lock does that.
*
* Since vmalloc fault fixups occur in any context this lock is taken
* irqsave at all times.
*/
spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED; list_for_each_entry(ks, &symbols, list) {
unsigned int i;
/** for (i = 0; i < ks->num_syms; i++) {
* inter_module_register - register a new set of inter module data. if (strcmp(ks->syms[i].name, name) == 0) {
* @im_name: an arbitrary string to identify the data, must be unique *group = ks;
* @owner: module that is registering the data, always use THIS_MODULE return ks->syms[i].value;
* @userdata: pointer to arbitrary userdata to be registered
*
* Description: Check that the im_name has not already been registered,
* complain if it has. For new data, add it to the inter_module_entry
* list.
*/
void inter_module_register(const char *im_name, struct module *owner, const void *userdata)
{
struct list_head *tmp;
struct inter_module_entry *ime, *ime_new;
if (!(ime_new = kmalloc(sizeof(*ime), GFP_KERNEL))) {
/* Overloaded kernel, not fatal */
printk(KERN_ERR
"Aiee, inter_module_register: cannot kmalloc entry for '%s'\n",
im_name);
kmalloc_failed = 1;
return;
} }
memset(ime_new, 0, sizeof(*ime_new));
ime_new->im_name = im_name;
ime_new->owner = owner;
ime_new->userdata = userdata;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
spin_unlock(&ime_lock);
kfree(ime_new);
/* Program logic error, fatal */
printk(KERN_ERR "inter_module_register: duplicate im_name '%s'", im_name);
BUG();
} }
} }
list_add(&(ime_new->list), &ime_list); DEBUGP("Failed to find symbol %s\n", name);
spin_unlock(&ime_lock); return 0;
} }
/** /* Find a symbol in this elf symbol table */
* inter_module_unregister - unregister a set of inter module data. static unsigned long find_local_symbol(Elf_Shdr *sechdrs,
* @im_name: an arbitrary string to identify the data, must be unique unsigned int symindex,
* const char *strtab,
* Description: Check that the im_name has been registered, complain if const char *name)
* it has not. For existing data, remove it from the
* inter_module_entry list.
*/
void inter_module_unregister(const char *im_name)
{ {
struct list_head *tmp; unsigned int i;
struct inter_module_entry *ime; Elf_Sym *sym = (void *)sechdrs[symindex].sh_offset;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
list_del(&(ime->list));
spin_unlock(&ime_lock);
kfree(ime);
return;
}
}
spin_unlock(&ime_lock);
if (kmalloc_failed) {
printk(KERN_ERR
"inter_module_unregister: no entry for '%s', "
"probably caused by previous kmalloc failure\n",
im_name);
return;
}
else {
/* Program logic error, fatal */
printk(KERN_ERR "inter_module_unregister: no entry for '%s'", im_name);
BUG();
}
}
/** /* Search (defined) internal symbols first. */
* inter_module_get - return arbitrary userdata from another module. for (i = 1; i < sechdrs[symindex].sh_size/sizeof(*sym); i++) {
* @im_name: an arbitrary string to identify the data, must be unique if (sym[i].st_shndx != SHN_UNDEF
* && strcmp(name, strtab + sym[i].st_name) == 0)
* Description: If the im_name has not been registered, return NULL. return sym[i].st_value;
* Try to increment the use count on the owning module, if that fails
* then return NULL. Otherwise return the userdata.
*/
const void *inter_module_get(const char *im_name)
{
struct list_head *tmp;
struct inter_module_entry *ime;
const void *result = NULL;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
if (try_inc_mod_count(ime->owner))
result = ime->userdata;
break;
}
} }
spin_unlock(&ime_lock); return 0;
return(result);
} }
/** /* Search for module by name: must hold module_mutex. */
* inter_module_get_request - im get with automatic request_module. static struct module *find_module(const char *name)
* @im_name: an arbitrary string to identify the data, must be unique
* @modname: module that is expected to register im_name
*
* Description: If inter_module_get fails, do request_module then retry.
*/
const void *inter_module_get_request(const char *im_name, const char *modname)
{ {
const void *result = inter_module_get(im_name); struct module *mod;
if (!result) {
request_module(modname);
result = inter_module_get(im_name);
}
return(result);
}
/** list_for_each_entry(mod, &modules, list) {
* inter_module_put - release use of data from another module. if (strcmp(mod->name, name) == 0)
* @im_name: an arbitrary string to identify the data, must be unique return mod;
*
* Description: If the im_name has not been registered, complain,
* otherwise decrement the use count on the owning module.
*/
void inter_module_put(const char *im_name)
{
struct list_head *tmp;
struct inter_module_entry *ime;
spin_lock(&ime_lock);
list_for_each(tmp, &ime_list) {
ime = list_entry(tmp, struct inter_module_entry, list);
if (strcmp(ime->im_name, im_name) == 0) {
if (ime->owner)
__MOD_DEC_USE_COUNT(ime->owner);
spin_unlock(&ime_lock);
return;
}
} }
spin_unlock(&ime_lock); return NULL;
printk(KERN_ERR "inter_module_put: no entry for '%s'", im_name);
BUG();
} }
#ifdef CONFIG_MODULE_UNLOAD
#if defined(CONFIG_MODULES) /* The rest of the source */ /* Init the unload section of the module. */
static void module_unload_init(struct module *mod)
static long get_mod_name(const char *user_name, char **buf);
static void put_mod_name(char *buf);
struct module *find_module(const char *name);
void free_module(struct module *, int tag_freed);
/*
* Called at boot time
*/
void __init init_modules(void)
{ {
kernel_module.nsyms = __stop___ksymtab - __start___ksymtab; unsigned int i;
arch_init_modules(&kernel_module); INIT_LIST_HEAD(&mod->modules_which_use_me);
for (i = 0; i < NR_CPUS; i++)
atomic_set(&mod->ref[i].count, 0);
/* Backwards compatibility macros put refcount during init. */
mod->waiter = current;
} }
/* /* modules using other modules */
* Copy the name of a module from user space. struct module_use
*/ {
struct list_head list;
struct module *module_which_uses;
};
static inline long /* Does a already use b? */
get_mod_name(const char *user_name, char **buf) static int already_uses(struct module *a, struct module *b)
{ {
unsigned long page; struct module_use *use;
long retval;
page = __get_free_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
retval = strncpy_from_user((char *)page, user_name, PAGE_SIZE);
if (retval > 0) {
if (retval < PAGE_SIZE) {
*buf = (char *)page;
return retval;
}
retval = -ENAMETOOLONG;
} else if (!retval)
retval = -EINVAL;
free_page(page); list_for_each_entry(use, &b->modules_which_use_me, list) {
return retval; if (use->module_which_uses == a) {
DEBUGP("%s uses %s!\n", a->name, b->name);
return 1;
}
}
DEBUGP("%s does not use %s!\n", a->name, b->name);
return 0;
} }
static inline void /* Module a uses b */
put_mod_name(char *buf) static int use_module(struct module *a, struct module *b)
{ {
free_page((unsigned long)buf); struct module_use *use;
} if (b == NULL || already_uses(a, b)) return 1;
/* DEBUGP("Allocating new usage for %s.\n", a->name);
* Allocate space for a module. use = kmalloc(sizeof(*use), GFP_ATOMIC);
*/ if (!use) {
printk("%s: out of memory loading\n", a->name);
return 0;
}
asmlinkage unsigned long use->module_which_uses = a;
sys_create_module(const char *name_user, size_t size) list_add(&use->list, &b->modules_which_use_me);
{ try_module_get(b); /* Can't fail */
char *name; return 1;
long namelen, error; }
struct module *mod;
unsigned long flags;
if (!capable(CAP_SYS_MODULE)) /* Clear the unload stuff of the module. */
return -EPERM; static void module_unload_free(struct module *mod)
lock_kernel(); {
if ((namelen = get_mod_name(name_user, &name)) < 0) { struct module *i;
error = namelen;
goto err0; list_for_each_entry(i, &modules, list) {
} struct module_use *use;
if (size < sizeof(struct module)+namelen) {
error = -EINVAL; list_for_each_entry(use, &i->modules_which_use_me, list) {
goto err1; if (use->module_which_uses == mod) {
DEBUGP("%s unusing %s\n", mod->name, i->name);
module_put(i);
list_del(&use->list);
kfree(use);
/* There can be at most one match. */
break;
} }
if (find_module(name) != NULL) {
error = -EEXIST;
goto err1;
} }
if ((mod = (struct module *)module_map(size)) == NULL) {
error = -ENOMEM;
goto err1;
} }
memset(mod, 0, sizeof(*mod));
mod->size_of_struct = sizeof(*mod);
mod->name = (char *)(mod + 1);
mod->size = size;
memcpy((char*)(mod+1), name, namelen+1);
put_mod_name(name);
spin_lock_irqsave(&modlist_lock, flags);
mod->next = module_list;
module_list = mod; /* link it in */
spin_unlock_irqrestore(&modlist_lock, flags);
error = (long) mod;
goto err0;
err1:
put_mod_name(name);
err0:
unlock_kernel();
return error;
} }
/* #ifdef CONFIG_SMP
* Initialize a module. /* Thread to stop each CPU in user context. */
*/ enum stopref_state {
STOPREF_WAIT,
STOPREF_PREPARE,
STOPREF_DISABLE_IRQ,
STOPREF_EXIT,
};
asmlinkage long static enum stopref_state stopref_state;
sys_init_module(const char *name_user, struct module *mod_user) static unsigned int stopref_num_threads;
static atomic_t stopref_thread_ack;
static int stopref(void *cpu)
{ {
struct module mod_tmp, *mod; int irqs_disabled = 0;
char *name, *n_name, *name_tmp = NULL; int prepared = 0;
long namelen, n_namelen, i, error;
unsigned long mod_user_size;
struct module_ref *dep;
if (!capable(CAP_SYS_MODULE)) sprintf(current->comm, "kmodule%lu\n", (unsigned long)cpu);
return -EPERM;
lock_kernel();
if ((namelen = get_mod_name(name_user, &name)) < 0) {
error = namelen;
goto err0;
}
if ((mod = find_module(name)) == NULL) {
error = -ENOENT;
goto err1;
}
/* Check module header size. We allow a bit of slop over the /* Highest priority we can manage, and move to right CPU. */
size we are familiar with to cope with a version of insmod #if 0 /* FIXME */
for a newer kernel. But don't over do it. */ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
if ((error = get_user(mod_user_size, &mod_user->size_of_struct)) != 0) setscheduler(current->pid, SCHED_FIFO, &param);
goto err1; #endif
if (mod_user_size < (unsigned long)&((struct module *)0L)->persist_start set_cpus_allowed(current, 1 << (unsigned long)cpu);
|| mod_user_size > sizeof(struct module) + 16*sizeof(void*)) {
printk(KERN_ERR "init_module: Invalid module header size.\n" /* Ack: we are alive */
KERN_ERR "A new version of the modutils is likely " atomic_inc(&stopref_thread_ack);
"needed.\n");
error = -EINVAL; /* Simple state machine */
goto err1; while (stopref_state != STOPREF_EXIT) {
if (stopref_state == STOPREF_DISABLE_IRQ && !irqs_disabled) {
local_irq_disable();
irqs_disabled = 1;
/* Ack: irqs disabled. */
atomic_inc(&stopref_thread_ack);
} else if (stopref_state == STOPREF_PREPARE && !prepared) {
/* Everyone is in place, hold CPU. */
preempt_disable();
prepared = 1;
atomic_inc(&stopref_thread_ack);
}
if (irqs_disabled || prepared)
cpu_relax();
else
yield();
} }
/* Hold the current contents while we play with the user's idea /* Ack: we are exiting. */
of righteousness. */ atomic_inc(&stopref_thread_ack);
mod_tmp = *mod;
name_tmp = kmalloc(strlen(mod->name) + 1, GFP_KERNEL); /* Where's kstrdup()? */
if (name_tmp == NULL) {
error = -ENOMEM;
goto err1;
}
strcpy(name_tmp, mod->name);
error = copy_from_user(mod, mod_user, mod_user_size); if (irqs_disabled)
if (error) { local_irq_enable();
error = -EFAULT; if (prepared)
goto err2; preempt_enable();
}
/* Sanity check the size of the module. */ return 0;
error = -EINVAL; }
if (mod->size > mod_tmp.size) { /* Change the thread state */
printk(KERN_ERR "init_module: Size of initialized module " static void stopref_set_state(enum stopref_state state, int sleep)
"exceeds size of created module.\n"); {
goto err2; atomic_set(&stopref_thread_ack, 0);
wmb();
stopref_state = state;
while (atomic_read(&stopref_thread_ack) != stopref_num_threads) {
if (sleep)
yield();
else
cpu_relax();
} }
}
/* Make sure all interesting pointers are sane. */ /* Stop the machine. Disables irqs. */
static int stop_refcounts(void)
{
unsigned int i, cpu;
unsigned long old_allowed;
int ret = 0;
if (!mod_bound(mod->name, namelen, mod)) { /* One thread per cpu. We'll do our own. */
printk(KERN_ERR "init_module: mod->name out of bounds.\n"); cpu = smp_processor_id();
goto err2;
}
if (mod->nsyms && !mod_bound(mod->syms, mod->nsyms, mod)) {
printk(KERN_ERR "init_module: mod->syms out of bounds.\n");
goto err2;
}
if (mod->ndeps && !mod_bound(mod->deps, mod->ndeps, mod)) {
printk(KERN_ERR "init_module: mod->deps out of bounds.\n");
goto err2;
}
if (mod->init && !mod_bound((unsigned long)mod->init, 0, mod)) {
printk(KERN_ERR "init_module: mod->init out of bounds.\n");
goto err2;
}
if (mod->cleanup && !mod_bound((unsigned long)mod->cleanup, 0, mod)) {
printk(KERN_ERR "init_module: mod->cleanup out of bounds.\n");
goto err2;
}
if (mod->ex_table_start > mod->ex_table_end
|| (mod->ex_table_start &&
!((unsigned long)mod->ex_table_start >= ((unsigned long)mod + mod->size_of_struct)
&& ((unsigned long)mod->ex_table_end
< (unsigned long)mod + mod->size)))
|| (((unsigned long)mod->ex_table_start
- (unsigned long)mod->ex_table_end)
% sizeof(struct exception_table_entry))) {
printk(KERN_ERR "init_module: mod->ex_table_* invalid.\n");
goto err2;
}
if (mod->flags & ~MOD_AUTOCLEAN) {
printk(KERN_ERR "init_module: mod->flags invalid.\n");
goto err2;
}
if (mod_member_present(mod, can_unload)
&& mod->can_unload && !mod_bound((unsigned long)mod->can_unload, 0, mod)) {
printk(KERN_ERR "init_module: mod->can_unload out of bounds.\n");
goto err2;
}
if (mod_member_present(mod, kallsyms_end)) {
if (mod->kallsyms_end &&
(!mod_bound(mod->kallsyms_start, 0, mod) ||
!mod_bound(mod->kallsyms_end, 0, mod))) {
printk(KERN_ERR "init_module: mod->kallsyms out of bounds.\n");
goto err2;
}
if (mod->kallsyms_start > mod->kallsyms_end) {
printk(KERN_ERR "init_module: mod->kallsyms invalid.\n");
goto err2;
}
}
if (mod_member_present(mod, archdata_end)) {
if (mod->archdata_end &&
(!mod_bound(mod->archdata_start, 0, mod) ||
!mod_bound(mod->archdata_end, 0, mod))) {
printk(KERN_ERR "init_module: mod->archdata out of bounds.\n");
goto err2;
}
if (mod->archdata_start > mod->archdata_end) {
printk(KERN_ERR "init_module: mod->archdata invalid.\n");
goto err2;
}
}
if (mod_member_present(mod, kernel_data) && mod->kernel_data) {
printk(KERN_ERR "init_module: mod->kernel_data must be zero.\n");
goto err2;
}
/* Check that the user isn't doing something silly with the name. */ /* FIXME: racy with set_cpus_allowed. */
old_allowed = current->cpus_allowed;
set_cpus_allowed(current, 1 << (unsigned long)cpu);
if ((n_namelen = get_mod_name(mod->name - (unsigned long)mod atomic_set(&stopref_thread_ack, 0);
+ (unsigned long)mod_user, stopref_num_threads = 0;
&n_name)) < 0) { stopref_state = STOPREF_WAIT;
printk(KERN_ERR "init_module: get_mod_name failure.\n");
error = n_namelen;
goto err2;
}
if (namelen != n_namelen || strcmp(n_name, mod_tmp.name) != 0) {
printk(KERN_ERR "init_module: changed module name to "
"`%s' from `%s'\n",
n_name, mod_tmp.name);
goto err3;
}
/* Ok, that's about all the sanity we can stomach; copy the rest. */ /* No CPUs can come up or down during this. */
down(&cpucontrol);
if (copy_from_user((char *)mod+mod_user_size, for (i = 0; i < NR_CPUS; i++) {
(char *)mod_user+mod_user_size, if (i == cpu || !cpu_online(i))
mod->size-mod_user_size)) { continue;
error = -EFAULT; ret = kernel_thread(stopref, (void *)i, CLONE_KERNEL);
goto err3; if (ret < 0)
break;
stopref_num_threads++;
} }
if (module_arch_init(mod)) /* Wait for them all to come to life. */
goto err3; while (atomic_read(&stopref_thread_ack) != stopref_num_threads)
yield();
/* On some machines it is necessary to do something here
to make the I and D caches consistent. */
flush_icache_range((unsigned long)mod, (unsigned long)mod + mod->size);
mod->next = mod_tmp.next; /* If some failed, kill them all. */
mod->refs = NULL; if (ret < 0) {
stopref_set_state(STOPREF_EXIT, 1);
/* Sanity check the module's dependents */ up(&cpucontrol);
for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) { return ret;
struct module *o, *d = dep->dep;
/* Make sure the indicated dependencies are really modules. */
if (d == mod) {
printk(KERN_ERR "init_module: self-referential "
"dependency in mod->deps.\n");
goto err3;
} }
/* Scan the current modules for this dependency */ /* Don't schedule us away at this point, please. */
for (o = module_list; o != &kernel_module && o != d; o = o->next) preempt_disable();
;
if (o != d) { /* Now they are all scheduled, make them hold the CPUs, ready. */
printk(KERN_ERR "init_module: found dependency that is " stopref_set_state(STOPREF_PREPARE, 0);
"(no longer?) a module.\n");
goto err3;
}
}
/* Update module references. */ /* Make them disable irqs. */
for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) { stopref_set_state(STOPREF_DISABLE_IRQ, 0);
struct module *d = dep->dep;
dep->ref = mod; local_irq_disable();
dep->next_ref = d->refs; return 0;
d->refs = dep; }
/* Being referenced by a dependent module counts as a
use as far as kmod is concerned. */
d->flags |= MOD_USED_ONCE;
}
/* Free our temporary memory. */ /* Restart the machine. Re-enables irqs. */
put_mod_name(n_name); static void restart_refcounts(void)
put_mod_name(name); {
stopref_set_state(STOPREF_EXIT, 0);
/* Initialize the module. */ local_irq_enable();
atomic_set(&mod->uc.usecount,1); preempt_enable();
mod->flags |= MOD_INITIALIZING; up(&cpucontrol);
if (mod->init && (error = mod->init()) != 0) {
atomic_set(&mod->uc.usecount,0);
mod->flags &= ~MOD_INITIALIZING;
if (error > 0) /* Buggy module */
error = -EBUSY;
goto err0;
}
atomic_dec(&mod->uc.usecount);
/* And set it running. */
mod->flags = (mod->flags | MOD_RUNNING) & ~MOD_INITIALIZING;
error = 0;
goto err0;
err3:
put_mod_name(n_name);
err2:
*mod = mod_tmp;
strcpy((char *)mod->name, name_tmp); /* We know there is room for this */
err1:
put_mod_name(name);
err0:
unlock_kernel();
kfree(name_tmp);
return error;
} }
#else /* ...!SMP */
static inline int stop_refcounts(void)
{
local_irq_disable();
return 0;
}
static inline void restart_refcounts(void)
{
local_irq_enable();
}
#endif
static spinlock_t unload_lock = SPIN_LOCK_UNLOCKED; static unsigned int module_refcount(struct module *mod)
int try_inc_mod_count(struct module *mod)
{ {
int res = 1; unsigned int i, total = 0;
if (mod) {
spin_lock(&unload_lock); for (i = 0; i < NR_CPUS; i++)
if (mod->flags & MOD_DELETED) total += atomic_read(&mod->ref[i].count);
res = 0; return total;
else
__MOD_INC_USE_COUNT(mod);
spin_unlock(&unload_lock);
}
return res;
} }
/* This exists whether we can unload or not */
static void free_module(struct module *mod);
asmlinkage long asmlinkage long
sys_delete_module(const char *name_user) sys_delete_module(const char *name_user, unsigned int flags)
{ {
struct module *mod, *next; struct module *mod;
char *name; char name[MODULE_NAME_LEN];
long error; int ret;
int something_changed;
if (!capable(CAP_SYS_MODULE)) if (!capable(CAP_SYS_MODULE))
return -EPERM; return -EPERM;
lock_kernel(); if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
if (name_user) { return -EFAULT;
if ((error = get_mod_name(name_user, &name)) < 0) name[MODULE_NAME_LEN-1] = '\0';
goto out;
error = -ENOENT; if (down_interruptible(&module_mutex) != 0)
if ((mod = find_module(name)) == NULL) { return -EINTR;
put_mod_name(name);
mod = find_module(name);
if (!mod) {
ret = -ENOENT;
goto out; goto out;
} }
put_mod_name(name);
error = -EBUSY;
if (mod->refs != NULL)
goto out;
spin_lock(&unload_lock); /* Already dying? */
if (!__MOD_IN_USE(mod)) { if (!mod->live) {
mod->flags |= MOD_DELETED; DEBUGP("%s already dying\n", mod->name);
spin_unlock(&unload_lock); ret = -EBUSY;
free_module(mod, 0);
error = 0;
} else {
spin_unlock(&unload_lock);
}
goto out; goto out;
} }
/* Do automatic reaping */ if (!mod->exit || mod->unsafe) {
restart: /* This module can't be removed */
something_changed = 0; ret = -EBUSY;
goto out;
for (mod = module_list; mod != &kernel_module; mod = next) {
next = mod->next;
spin_lock(&unload_lock);
if (mod->refs == NULL
&& (mod->flags & MOD_AUTOCLEAN)
&& (mod->flags & MOD_RUNNING)
&& !(mod->flags & MOD_DELETED)
&& (mod->flags & MOD_USED_ONCE)
&& !__MOD_IN_USE(mod)) {
if ((mod->flags & MOD_VISITED)
&& !(mod->flags & MOD_JUST_FREED)) {
spin_unlock(&unload_lock);
mod->flags &= ~MOD_VISITED;
} else {
mod->flags |= MOD_DELETED;
spin_unlock(&unload_lock);
free_module(mod, 1);
something_changed = 1;
}
} else {
spin_unlock(&unload_lock);
} }
if (!list_empty(&mod->modules_which_use_me)) {
/* Other modules depend on us: get rid of them first. */
ret = -EWOULDBLOCK;
goto out;
} }
if (something_changed) /* Stop the machine so refcounts can't move: irqs disabled. */
goto restart; DEBUGP("Stopping refcounts...\n");
ret = stop_refcounts();
for (mod = module_list; mod != &kernel_module; mod = mod->next) if (ret != 0)
mod->flags &= ~MOD_JUST_FREED; goto out;
error = 0;
out:
unlock_kernel();
return error;
}
/* Query various bits about modules. */
static int /* If it's not unused, quit unless we are told to block. */
qm_modules(char *buf, size_t bufsize, size_t *ret) if ((flags & O_NONBLOCK) && module_refcount(mod) != 0)
{ ret = -EWOULDBLOCK;
struct module *mod; else {
size_t nmod, space, len; mod->waiter = current;
mod->live = 0;
}
restart_refcounts();
nmod = space = 0; if (ret != 0)
goto out;
for (mod=module_list; mod != &kernel_module; mod=mod->next, ++nmod) { /* Since we might sleep for some time, drop the semaphore first */
len = strlen(mod->name)+1; up(&module_mutex);
if (len > bufsize) for (;;) {
goto calc_space_needed; DEBUGP("Looking at refcount...\n");
if (copy_to_user(buf, mod->name, len)) set_current_state(TASK_UNINTERRUPTIBLE);
return -EFAULT; if (module_refcount(mod) == 0)
buf += len; break;
bufsize -= len; schedule();
space += len;
} }
current->state = TASK_RUNNING;
if (put_user(nmod, ret)) DEBUGP("Regrabbing mutex...\n");
return -EFAULT; down(&module_mutex);
else
return 0;
calc_space_needed: /* Final destruction now noone is using it. */
space += len; mod->exit();
while ((mod = mod->next) != &kernel_module) free_module(mod);
space += strlen(mod->name)+1; ret = 0;
if (put_user(space, ret)) out:
return -EFAULT; up(&module_mutex);
else return ret;
return -ENOSPC;
} }
static int static void print_unload_info(struct seq_file *m, struct module *mod)
qm_deps(struct module *mod, char *buf, size_t bufsize, size_t *ret)
{ {
size_t i, space, len; struct module_use *use;
if (mod == &kernel_module) seq_printf(m, " %u", module_refcount(mod));
return -EINVAL;
if (!MOD_CAN_QUERY(mod))
if (put_user(0, ret))
return -EFAULT;
else
return 0;
space = 0; list_for_each_entry(use, &mod->modules_which_use_me, list)
for (i = 0; i < mod->ndeps; ++i) { seq_printf(m, " %s", use->module_which_uses->name);
const char *dep_name = mod->deps[i].dep->name;
len = strlen(dep_name)+1; if (mod->unsafe)
if (len > bufsize) seq_printf(m, " [unsafe]");
goto calc_space_needed;
if (copy_to_user(buf, dep_name, len))
return -EFAULT;
buf += len;
bufsize -= len;
space += len;
}
if (put_user(i, ret))
return -EFAULT;
else
return 0;
calc_space_needed: if (!mod->exit)
space += len; seq_printf(m, " [permanent]");
while (++i < mod->ndeps)
space += strlen(mod->deps[i].dep->name)+1;
if (put_user(space, ret)) seq_printf(m, "\n");
return -EFAULT;
else
return -ENOSPC;
} }
static int void __symbol_put(const char *symbol)
qm_refs(struct module *mod, char *buf, size_t bufsize, size_t *ret)
{ {
size_t nrefs, space, len; struct kernel_symbol_group *ksg;
struct module_ref *ref; unsigned long flags;
if (mod == &kernel_module)
return -EINVAL;
if (!MOD_CAN_QUERY(mod))
if (put_user(0, ret))
return -EFAULT;
else
return 0;
space = 0; spin_lock_irqsave(&modlist_lock, flags);
for (nrefs = 0, ref = mod->refs; ref ; ++nrefs, ref = ref->next_ref) { if (!__find_symbol(symbol, &ksg))
const char *ref_name = ref->ref->name; BUG();
module_put(ksg->owner);
spin_unlock_irqrestore(&modlist_lock, flags);
}
EXPORT_SYMBOL(__symbol_put);
len = strlen(ref_name)+1; #else /* !CONFIG_MODULE_UNLOAD */
if (len > bufsize) static void print_unload_info(struct seq_file *m, struct module *mod)
goto calc_space_needed; {
if (copy_to_user(buf, ref_name, len)) seq_printf(m, "\n");
return -EFAULT; }
buf += len;
bufsize -= len;
space += len;
}
if (put_user(nrefs, ret)) static inline void module_unload_free(struct module *mod)
return -EFAULT; {
else }
return 0;
calc_space_needed: static inline int use_module(struct module *a, struct module *b)
space += len; {
while ((ref = ref->next_ref) != NULL) return try_module_get(b);
space += strlen(ref->ref->name)+1; }
if (put_user(space, ret)) static inline void module_unload_init(struct module *mod)
return -EFAULT; {
else
return -ENOSPC;
} }
static int asmlinkage long
qm_symbols(struct module *mod, char *buf, size_t bufsize, size_t *ret) sys_delete_module(const char *name_user, unsigned int flags)
{ {
size_t i, space, len; return -ENOSYS;
struct module_symbol *s; }
char *strings;
unsigned long *vals;
if (!MOD_CAN_QUERY(mod)) #endif /* CONFIG_MODULE_UNLOAD */
if (put_user(0, ret))
return -EFAULT;
else
return 0;
space = mod->nsyms * 2*sizeof(void *); /* Find an symbol for this module (ie. resolve internals first).
It we find one, record usage. Must be holding module_mutex. */
unsigned long find_symbol_internal(Elf_Shdr *sechdrs,
unsigned int symindex,
const char *strtab,
const char *name,
struct module *mod,
struct kernel_symbol_group **ksg)
{
unsigned long ret;
ret = find_local_symbol(sechdrs, symindex, strtab, name);
if (ret) {
*ksg = NULL;
return ret;
}
/* Look in other modules... */
spin_lock_irq(&modlist_lock);
ret = __find_symbol(name, ksg);
if (ret) {
/* This can fail due to OOM, or module unloading */
if (!use_module(mod, (*ksg)->owner))
ret = 0;
}
spin_unlock_irq(&modlist_lock);
return ret;
}
i = len = 0; /* Free a module, remove from lists, etc (must hold module mutex). */
s = mod->syms; static void free_module(struct module *mod)
{
/* Delete from various lists */
list_del(&mod->list);
spin_lock_irq(&modlist_lock);
list_del(&mod->symbols.list);
list_del(&mod->extable.list);
spin_unlock_irq(&modlist_lock);
/* These may be NULL, but that's OK */
module_free(mod, mod->module_init);
module_free(mod, mod->module_core);
/* Module unload stuff */
module_unload_free(mod);
/* Finally, free the module structure */
kfree(mod);
}
if (space > bufsize) void *__symbol_get(const char *symbol)
goto calc_space_needed; {
struct kernel_symbol_group *ksg;
unsigned long value, flags;
if (!access_ok(VERIFY_WRITE, buf, space)) spin_lock_irqsave(&modlist_lock, flags);
return -EFAULT; value = __find_symbol(symbol, &ksg);
if (value && !try_module_get(ksg->owner))
value = 0;
spin_unlock_irqrestore(&modlist_lock, flags);
bufsize -= space; return (void *)value;
vals = (unsigned long *)buf; }
strings = buf+space; EXPORT_SYMBOL_GPL(__symbol_get);
for (; i < mod->nsyms ; ++i, ++s, vals += 2) { void symbol_put_addr(void *addr)
len = strlen(s->name)+1; {
if (len > bufsize) struct kernel_symbol_group *ks;
goto calc_space_needed; unsigned long flags;
if (copy_to_user(strings, s->name, len) spin_lock_irqsave(&modlist_lock, flags);
|| __put_user(s->value, vals+0) list_for_each_entry(ks, &symbols, list) {
|| __put_user(space, vals+1)) unsigned int i;
return -EFAULT;
strings += len; for (i = 0; i < ks->num_syms; i++) {
bufsize -= len; if (ks->syms[i].value == (unsigned long)addr) {
space += len; module_put(ks->owner);
spin_unlock_irqrestore(&modlist_lock, flags);
return;
} }
if (put_user(i, ret)) }
return -EFAULT; }
else spin_unlock_irqrestore(&modlist_lock, flags);
return 0; BUG();
calc_space_needed:
for (; i < mod->nsyms; ++i, ++s)
space += strlen(s->name)+1;
if (put_user(space, ret))
return -EFAULT;
else
return -ENOSPC;
} }
EXPORT_SYMBOL_GPL(symbol_put_addr);
static int
qm_info(struct module *mod, char *buf, size_t bufsize, size_t *ret) /* Transfer one ELF section to the correct (init or core) area. */
static void *copy_section(const char *name,
void *base,
Elf_Shdr *sechdr,
struct module *mod,
struct sizes *used)
{ {
int error = 0; void *dest;
unsigned long *use;
if (mod == &kernel_module) /* Only copy to init section if there is one */
return -EINVAL; if (strstr(name, ".init") && mod->module_init) {
dest = mod->module_init;
if (sizeof(struct module_info) <= bufsize) { use = &used->init_size;
struct module_info info; } else {
info.addr = (unsigned long)mod; dest = mod->module_core;
info.size = mod->size; use = &used->core_size;
info.flags = mod->flags; }
/* usecount is one too high here - report appropriately to
compensate for locking */
info.usecount = (mod_member_present(mod, can_unload)
&& mod->can_unload ? -1 : atomic_read(&mod->uc.usecount)-1);
if (copy_to_user(buf, &info, sizeof(struct module_info))) /* Align up */
return -EFAULT; *use = ALIGN(*use, sechdr->sh_addralign);
} else dest += *use;
error = -ENOSPC; *use += sechdr->sh_size;
if (put_user(sizeof(struct module_info), ret)) /* May not actually be in the file (eg. bss). */
return -EFAULT; if (sechdr->sh_type != SHT_NOBITS)
memcpy(dest, base + sechdr->sh_offset, sechdr->sh_size);
return error; return dest;
} }
asmlinkage long /* Look for the special symbols */
sys_query_module(const char *name_user, int which, char *buf, size_t bufsize, static int grab_private_symbols(Elf_Shdr *sechdrs,
size_t *ret) unsigned int symbolsec,
const char *strtab,
struct module *mod)
{ {
struct module *mod; Elf_Sym *sym = (void *)sechdrs[symbolsec].sh_offset;
int err; unsigned int i;
lock_kernel(); for (i = 1; i < sechdrs[symbolsec].sh_size/sizeof(*sym); i++) {
if (name_user == NULL) if (strcmp("__initfn", strtab + sym[i].st_name) == 0)
mod = &kernel_module; mod->init = (void *)sym[i].st_value;
else { #ifdef CONFIG_MODULE_UNLOAD
long namelen; if (strcmp("__exitfn", strtab + sym[i].st_name) == 0)
char *name; mod->exit = (void *)sym[i].st_value;
#endif
if ((namelen = get_mod_name(name_user, &name)) < 0) {
err = namelen;
goto out;
}
err = -ENOENT;
if ((mod = find_module(name)) == NULL) {
put_mod_name(name);
goto out;
}
put_mod_name(name);
} }
/* __MOD_ touches the flags. We must avoid that */ return 0;
}
atomic_inc(&mod->uc.usecount); /* Deal with the given section */
static int handle_section(const char *name,
Elf_Shdr *sechdrs,
unsigned int strindex,
unsigned int symindex,
unsigned int i,
struct module *mod)
{
int ret;
const char *strtab = (char *)sechdrs[strindex].sh_offset;
switch (which) switch (sechdrs[i].sh_type) {
{ case SHT_REL:
case 0: ret = apply_relocate(sechdrs, strtab, symindex, i, mod);
err = 0;
break;
case QM_MODULES:
err = qm_modules(buf, bufsize, ret);
break;
case QM_DEPS:
err = qm_deps(mod, buf, bufsize, ret);
break;
case QM_REFS:
err = qm_refs(mod, buf, bufsize, ret);
break; break;
case QM_SYMBOLS: case SHT_RELA:
err = qm_symbols(mod, buf, bufsize, ret); ret = apply_relocate_add(sechdrs, strtab, symindex, i, mod);
break; break;
case QM_INFO: case SHT_SYMTAB:
err = qm_info(mod, buf, bufsize, ret); ret = grab_private_symbols(sechdrs, i, strtab, mod);
break; break;
default: default:
err = -EINVAL; DEBUGP("Ignoring section %u: %s\n", i,
break; sechdrs[i].sh_type==SHT_NULL ? "NULL":
} sechdrs[i].sh_type==SHT_PROGBITS ? "PROGBITS":
atomic_dec(&mod->uc.usecount); sechdrs[i].sh_type==SHT_SYMTAB ? "SYMTAB":
sechdrs[i].sh_type==SHT_STRTAB ? "STRTAB":
out: sechdrs[i].sh_type==SHT_RELA ? "RELA":
unlock_kernel(); sechdrs[i].sh_type==SHT_HASH ? "HASH":
return err; sechdrs[i].sh_type==SHT_DYNAMIC ? "DYNAMIC":
sechdrs[i].sh_type==SHT_NOTE ? "NOTE":
sechdrs[i].sh_type==SHT_NOBITS ? "NOBITS":
sechdrs[i].sh_type==SHT_REL ? "REL":
sechdrs[i].sh_type==SHT_SHLIB ? "SHLIB":
sechdrs[i].sh_type==SHT_DYNSYM ? "DYNSYM":
sechdrs[i].sh_type==SHT_NUM ? "NUM":
"UNKNOWN");
ret = 0;
}
return ret;
} }
/* /* Figure out total size desired for the common vars */
* Copy the kernel symbol table to user space. If the argument is static unsigned long read_commons(void *start, Elf_Shdr *sechdr)
* NULL, just return the size of the table.
*
* This call is obsolete. New programs should use query_module+QM_SYMBOLS
* which does not arbitrarily limit the length of symbols.
*/
asmlinkage long
sys_get_kernel_syms(struct kernel_sym *table)
{ {
struct module *mod; unsigned long size, i, max_align;
int i; Elf_Sym *sym;
struct kernel_sym ksym;
lock_kernel(); size = max_align = 0;
for (mod = module_list, i = 0; mod; mod = mod->next) {
/* include the count for the module name! */
i += mod->nsyms + 1;
}
if (table == NULL)
goto out;
/* So that we don't give the user our stack content */ for (sym = start + sechdr->sh_offset, i = 0;
memset (&ksym, 0, sizeof (ksym)); i < sechdr->sh_size / sizeof(Elf_Sym);
i++) {
for (mod = module_list, i = 0; mod; mod = mod->next) { if (sym[i].st_shndx == SHN_COMMON) {
struct module_symbol *msym; /* Value encodes alignment. */
unsigned int j; if (sym[i].st_value > max_align)
max_align = sym[i].st_value;
if (!MOD_CAN_QUERY(mod)) /* Pad to required alignment */
continue; size = ALIGN(size, sym[i].st_value) + sym[i].st_size;
/* magic: write module info as a pseudo symbol */
ksym.value = (unsigned long)mod;
ksym.name[0] = '#';
strncpy(ksym.name+1, mod->name, sizeof(ksym.name)-1);
ksym.name[sizeof(ksym.name)-1] = '\0';
if (copy_to_user(table, &ksym, sizeof(ksym)) != 0)
goto out;
++i, ++table;
if (mod->nsyms == 0)
continue;
for (j = 0, msym = mod->syms; j < mod->nsyms; ++j, ++msym) {
ksym.value = msym->value;
strncpy(ksym.name, msym->name, sizeof(ksym.name));
ksym.name[sizeof(ksym.name)-1] = '\0';
if (copy_to_user(table, &ksym, sizeof(ksym)) != 0)
goto out;
++i, ++table;
} }
} }
out:
unlock_kernel();
return i;
}
/* /* Now, add in max alignment requirement (with align
* Look for a module by name, ignoring modules marked for deletion. attribute, this could be large), so we know we have space
*/ whatever the start alignment is */
return size + max_align;
}
struct module * /* Change all symbols so that sh_value encodes the pointer directly. */
find_module(const char *name) static void simplify_symbols(Elf_Shdr *sechdrs,
unsigned int symindex,
unsigned int strindex,
void *common,
struct module *mod)
{ {
struct module *mod; unsigned int i;
Elf_Sym *sym;
/* First simplify defined symbols, so if they become the
"answer" to undefined symbols, copying their st_value us
correct. */
for (sym = (void *)sechdrs[symindex].sh_offset, i = 0;
i < sechdrs[symindex].sh_size / sizeof(Elf_Sym);
i++) {
switch (sym[i].st_shndx) {
case SHN_COMMON:
/* Value encodes alignment. */
common = (void *)ALIGN((unsigned long)common,
sym[i].st_value);
/* Change it to encode pointer */
sym[i].st_value = (unsigned long)common;
common += sym[i].st_size;
break;
for (mod = module_list; mod ; mod = mod->next) { case SHN_ABS:
if (mod->flags & MOD_DELETED) /* Don't need to do anything */
continue; DEBUGP("Absolute symbol: 0x%08lx\n",
if (!strcmp(mod->name, name)) (long)sym[i].st_value);
break; break;
}
return mod; case SHN_UNDEF:
} break;
/* default:
* Free the given module. sym[i].st_value
*/ = (unsigned long)
(sechdrs[sym[i].st_shndx].sh_offset
+ sym[i].st_value);
}
}
/* Now try to resolve undefined symbols */
for (sym = (void *)sechdrs[symindex].sh_offset, i = 0;
i < sechdrs[symindex].sh_size / sizeof(Elf_Sym);
i++) {
if (sym[i].st_shndx == SHN_UNDEF) {
/* Look for symbol */
struct kernel_symbol_group *ksg = NULL;
const char *strtab
= (char *)sechdrs[strindex].sh_offset;
sym[i].st_value
= find_symbol_internal(sechdrs,
symindex,
strtab,
strtab + sym[i].st_name,
mod,
&ksg);
/* We fake up "__this_module" */
if (strcmp(strtab+sym[i].st_name, "__this_module")==0)
sym[i].st_value = (unsigned long)mod;
}
}
}
void /* Get the total allocation size of the init and non-init sections */
free_module(struct module *mod, int tag_freed) static struct sizes get_sizes(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
const char *secstrings)
{ {
struct module_ref *dep; struct sizes ret = { 0, 0 };
unsigned i; unsigned i;
unsigned long flags;
/* Let the module clean up. */ /* Everything marked ALLOC (this includes the exported
symbols) */
for (i = 1; i < hdr->e_shnum; i++) {
unsigned long *add;
if (mod->flags & MOD_RUNNING) /* If it's called *.init*, and we're init, we're interested */
{ if (strstr(secstrings + sechdrs[i].sh_name, ".init") != 0)
if(mod->cleanup) add = &ret.init_size;
mod->cleanup(); else
mod->flags &= ~MOD_RUNNING; add = &ret.core_size;
}
/* Remove the module from the dependency lists. */
for (i = 0, dep = mod->deps; i < mod->ndeps; ++i, ++dep) { if (sechdrs[i].sh_flags & SHF_ALLOC) {
struct module_ref **pp; /* Pad up to required alignment */
for (pp = &dep->dep->refs; *pp != dep; pp = &(*pp)->next_ref) *add = ALIGN(*add, sechdrs[i].sh_addralign ?: 1);
continue; *add += sechdrs[i].sh_size;
*pp = dep->next_ref;
if (tag_freed && dep->dep->refs == NULL)
dep->dep->flags |= MOD_JUST_FREED;
} }
/* And from the main module list. */
spin_lock_irqsave(&modlist_lock, flags);
if (mod == module_list) {
module_list = mod->next;
} else {
struct module *p;
for (p = module_list; p->next != mod; p = p->next)
continue;
p->next = mod->next;
} }
spin_unlock_irqrestore(&modlist_lock, flags);
/* And free the memory. */ return ret;
module_unmap(mod);
} }
/* /* Allocate and load the module */
* Called by the /proc file system to return a current list of modules. static struct module *load_module(void *umod,
*/ unsigned long len,
static void *m_start(struct seq_file *m, loff_t *pos) const char *uargs)
{ {
struct module *v; Elf_Ehdr *hdr;
loff_t n = *pos; Elf_Shdr *sechdrs;
lock_kernel(); char *secstrings;
for (v = module_list; v && n--; v = v->next) unsigned int i, symindex, exportindex, strindex, setupindex, exindex,
; modnameindex;
return v; long arglen;
} unsigned long common_length;
static void *m_next(struct seq_file *m, void *p, loff_t *pos) struct sizes sizes, used;
{
struct module *v = p;
(*pos)++;
return v->next;
}
static void m_stop(struct seq_file *m, void *p)
{
unlock_kernel();
}
static int m_show(struct seq_file *m, void *p)
{
struct module *mod = p;
struct module_ref *ref = mod->refs;
if (mod == &kernel_module)
return 0;
seq_printf(m, "%-20s%8lu", mod->name, mod->size);
if (mod->flags & MOD_RUNNING)
seq_printf(m, "%4ld",
(mod_member_present(mod, can_unload)
&& mod->can_unload
? -1L : (long)atomic_read(&mod->uc.usecount)));
if (mod->flags & MOD_DELETED)
seq_puts(m, " (deleted)");
else if (mod->flags & MOD_RUNNING) {
if (mod->flags & MOD_AUTOCLEAN)
seq_puts(m, " (autoclean)");
if (!(mod->flags & MOD_USED_ONCE))
seq_puts(m, " (unused)");
} else if (mod->flags & MOD_INITIALIZING)
seq_puts(m, " (initializing)");
else
seq_puts(m, " (uninitialized)");
if (ref) {
char c;
seq_putc(m, ' ');
for (c = '[' ; ref; c = ' ', ref = ref->next_ref)
seq_printf(m, "%c%s", c, ref->ref->name);
seq_putc(m, ']');
}
seq_putc(m, '\n');
return 0;
}
struct seq_operations modules_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = m_show
};
/*
* Called by the /proc file system to return a current list of ksyms.
*/
struct mod_sym {
struct module *mod; struct module *mod;
int index; int err = 0;
}; void *ptr = NULL; /* Stops spurious gcc uninitialized warning */
/* iterator */ DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
umod, len, uargs);
if (len < sizeof(*hdr))
return ERR_PTR(-ENOEXEC);
static void *s_start(struct seq_file *m, loff_t *pos) /* Suck in entire file: we'll want most of it. */
{ /* vmalloc barfs on "unusual" numbers. Check here */
struct mod_sym *p = kmalloc(sizeof(*p), GFP_KERNEL); if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
struct module *v;
loff_t n = *pos;
if (!p)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
lock_kernel(); if (copy_from_user(hdr, umod, len) != 0) {
for (v = module_list; v; n -= v->nsyms, v = v->next) { err = -EFAULT;
if (n < v->nsyms) { goto free_hdr;
p->mod = v; }
p->index = n;
return p; /* Sanity checks against insmoding binaries or wrong arch,
} weird elf version */
if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
|| hdr->e_type != ET_REL
|| !elf_check_arch(hdr)
|| hdr->e_shentsize != sizeof(*sechdrs)) {
err = -ENOEXEC;
goto free_hdr;
}
/* Convenience variables */
sechdrs = (void *)hdr + hdr->e_shoff;
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
/* May not export symbols, or have setup params, so these may
not exist */
exportindex = setupindex = 0;
/* And these should exist, but gcc whinges if we don't init them */
symindex = strindex = exindex = modnameindex = 0;
/* Find where important sections are */
for (i = 1; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_type == SHT_SYMTAB) {
/* Internal symbols */
DEBUGP("Symbol table in section %u\n", i);
symindex = i;
} else if (strcmp(secstrings+sechdrs[i].sh_name, ".modulename")
== 0) {
/* This module's name */
DEBUGP("Module name in section %u\n", i);
modnameindex = i;
} else if (strcmp(secstrings+sechdrs[i].sh_name, "__ksymtab")
== 0) {
/* Exported symbols. */
DEBUGP("EXPORT table in section %u\n", i);
exportindex = i;
} else if (strcmp(secstrings + sechdrs[i].sh_name, ".strtab")
== 0) {
/* Strings */
DEBUGP("String table found in section %u\n", i);
strindex = i;
} else if (strcmp(secstrings+sechdrs[i].sh_name, ".setup.init")
== 0) {
/* Setup parameter info */
DEBUGP("Setup table found in section %u\n", i);
setupindex = i;
} else if (strcmp(secstrings+sechdrs[i].sh_name, "__ex_table")
== 0) {
/* Exception table */
DEBUGP("Exception table found in section %u\n", i);
exindex = i;
}
#ifndef CONFIG_MODULE_UNLOAD
/* Don't load .exit sections */
if (strstr(secstrings+sechdrs[i].sh_name, ".exit"))
sechdrs[i].sh_flags &= ~(unsigned long)SHF_ALLOC;
#endif
} }
unlock_kernel();
kfree(p);
return NULL;
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos) if (!modnameindex) {
{ DEBUGP("Module has no name!\n");
struct mod_sym *v = p; err = -ENOEXEC;
(*pos)++; goto free_hdr;
if (++v->index >= v->mod->nsyms) { }
do {
v->mod = v->mod->next; /* Now allocate space for the module proper, and copy name and args. */
if (!v->mod) { err = strlen_user(uargs);
unlock_kernel(); if (err < 0)
kfree(p); goto free_hdr;
return NULL; arglen = err;
mod = kmalloc(sizeof(*mod) + arglen+1, GFP_KERNEL);
if (!mod) {
err = -ENOMEM;
goto free_hdr;
}
memset(mod, 0, sizeof(*mod) + arglen+1);
if (copy_from_user(mod->args, uargs, arglen) != 0) {
err = -EFAULT;
goto free_mod;
}
strncpy(mod->name, (char *)hdr + sechdrs[modnameindex].sh_offset,
sizeof(mod->name)-1);
if (find_module(mod->name)) {
err = -EEXIST;
goto free_mod;
}
/* Initialize the lists, since they will be list_del'd if init fails */
INIT_LIST_HEAD(&mod->extable.list);
INIT_LIST_HEAD(&mod->list);
INIT_LIST_HEAD(&mod->symbols.list);
mod->symbols.owner = mod;
mod->live = 0;
module_unload_init(mod);
/* How much space will we need? (Common area in core) */
sizes = get_sizes(hdr, sechdrs, secstrings);
common_length = read_commons(hdr, &sechdrs[symindex]);
sizes.core_size += common_length;
/* Set these up: arch's can add to them */
mod->core_size = sizes.core_size;
mod->init_size = sizes.init_size;
/* Allocate (this is arch specific) */
ptr = module_core_alloc(hdr, sechdrs, secstrings, mod);
if (IS_ERR(ptr))
goto free_mod;
mod->module_core = ptr;
ptr = module_init_alloc(hdr, sechdrs, secstrings, mod);
if (IS_ERR(ptr))
goto free_core;
mod->module_init = ptr;
/* Transfer each section which requires ALLOC, and set sh_offset
fields to absolute addresses. */
used.core_size = common_length;
used.init_size = 0;
for (i = 1; i < hdr->e_shnum; i++) {
if (sechdrs[i].sh_flags & SHF_ALLOC) {
ptr = copy_section(secstrings + sechdrs[i].sh_name,
hdr, &sechdrs[i], mod, &used);
if (IS_ERR(ptr))
goto cleanup;
sechdrs[i].sh_offset = (unsigned long)ptr;
} else {
sechdrs[i].sh_offset += (unsigned long)hdr;
} }
} while (!v->mod->nsyms);
v->index = 0;
} }
return p; /* Don't use more than we allocated! */
} if (used.init_size > mod->init_size || used.core_size > mod->core_size)
BUG();
static void s_stop(struct seq_file *m, void *p)
{
if (p && !IS_ERR(p)) {
unlock_kernel();
kfree(p);
}
}
static int s_show(struct seq_file *m, void *p) /* Fix up syms, so that st_value is a pointer to location. */
{ simplify_symbols(sechdrs, symindex, strindex, mod->module_core, mod);
struct mod_sym *v = p;
struct module_symbol *sym; /* Set up EXPORTed symbols */
if (exportindex) {
mod->symbols.num_syms = (sechdrs[exportindex].sh_size
/ sizeof(*mod->symbols.syms));
mod->symbols.syms = (void *)sechdrs[exportindex].sh_offset;
}
/* Set up exception table */
if (exindex) {
/* FIXME: Sort exception table. */
mod->extable.num_entries = (sechdrs[exindex].sh_size
/ sizeof(struct
exception_table_entry));
mod->extable.entry = (void *)sechdrs[exindex].sh_offset;
}
/* Now handle each section. */
for (i = 1; i < hdr->e_shnum; i++) {
err = handle_section(secstrings + sechdrs[i].sh_name,
sechdrs, strindex, symindex, i, mod);
if (err < 0)
goto cleanup;
}
err = module_finalize(hdr, sechdrs, mod);
if (err < 0)
goto cleanup;
#if 0 /* Needs param support */
/* Size of section 0 is 0, so this works well */
err = parse_args(mod->args,
(struct kernel_param *)
sechdrs[setupindex].sh_offset,
sechdrs[setupindex].sh_size
/ sizeof(struct kernel_param),
NULL);
if (err < 0)
goto cleanup;
#endif
if (!MOD_CAN_QUERY(v->mod)) /* Get rid of temporary copy */
return 0; vfree(hdr);
sym = &v->mod->syms[v->index];
if (*v->mod->name)
seq_printf(m, "%0*lx %s\t[%s]\n", (int)(2*sizeof(void*)),
sym->value, sym->name, v->mod->name);
else
seq_printf(m, "%0*lx %s\n", (int)(2*sizeof(void*)),
sym->value, sym->name);
return 0;
}
struct seq_operations ksyms_op = { /* Done! */
.start = s_start, return mod;
.next = s_next,
.stop = s_stop,
.show = s_show
};
#define MODLIST_SIZE 4096 cleanup:
module_unload_free(mod);
module_free(mod, mod->module_init);
free_core:
module_free(mod, mod->module_core);
free_mod:
kfree(mod);
free_hdr:
vfree(hdr);
if (err < 0) return ERR_PTR(err);
else return ptr;
}
/* /* This is where the real work happens */
* this function isn't smp safe but that's not really a problem; it's asmlinkage long
* called from oops context only and any locking could actually prevent sys_init_module(void *umod,
* the oops from going out; the line that is generated is informational unsigned long len,
* only and should NEVER prevent the real oops from going out. const char *uargs)
*/
void print_modules(void)
{ {
static char modlist[MODLIST_SIZE]; struct module *mod;
struct module *this_mod; int ret;
int pos = 0;
this_mod = module_list;
while (this_mod) {
if (this_mod->name)
pos += snprintf(modlist+pos, MODLIST_SIZE-pos-1,
"%s ", this_mod->name);
this_mod = this_mod->next;
}
printk("%s\n",modlist);
}
#else /* CONFIG_MODULES */ /* Must have permission */
if (!capable(CAP_SYS_MODULE))
return -EPERM;
/* Dummy syscalls for people who don't want modules */ /* Only one module load at a time, please */
if (down_interruptible(&module_mutex) != 0)
return -EINTR;
/* Do all the hard work */
mod = load_module(umod, len, uargs);
if (IS_ERR(mod)) {
up(&module_mutex);
return PTR_ERR(mod);
}
/* Flush the instruction cache, since we've played with text */
if (mod->module_init)
flush_icache_range((unsigned long)mod->module_init,
(unsigned long)mod->module_init
+ mod->init_size);
flush_icache_range((unsigned long)mod->module_core,
(unsigned long)mod->module_core + mod->core_size);
/* Now sew it into exception list (just in case...). */
spin_lock_irq(&modlist_lock);
list_add(&mod->extable.list, &extables);
spin_unlock_irq(&modlist_lock);
/* Start the module */
ret = mod->init ? mod->init() : 0;
if (ret < 0) {
/* Init routine failed: abort. Try to protect us from
buggy refcounters. */
synchronize_kernel();
if (mod->unsafe) {
printk(KERN_ERR "%s: module is now stuck!\n",
mod->name);
/* Mark it "live" so that they can force
deletion later, and we don't keep getting
woken on every decrement. */
mod->live = 1;
} else
free_module(mod);
up(&module_mutex);
return ret;
}
asmlinkage unsigned long /* Now it's a first class citizen! */
sys_create_module(const char *name_user, size_t size) spin_lock_irq(&modlist_lock);
{ list_add(&mod->symbols.list, &kernel_symbols.list);
return -ENOSYS; spin_unlock_irq(&modlist_lock);
} list_add(&mod->list, &modules);
asmlinkage long module_free(mod, mod->module_init);
sys_init_module(const char *name_user, struct module *mod_user) mod->module_init = NULL;
{
return -ENOSYS;
}
asmlinkage long /* All ok! */
sys_delete_module(const char *name_user) mod->live = 1;
{ up(&module_mutex);
return -ENOSYS; return 0;
} }
asmlinkage long /* Called by the /proc file system to return a current list of
sys_query_module(const char *name_user, int which, char *buf, size_t bufsize, modules. Al Viro came up with this interface as an "improvement".
size_t *ret) God save us from any more such interface improvements. */
static void *m_start(struct seq_file *m, loff_t *pos)
{ {
/* Let the program know about the new interface. Not that struct list_head *i;
it'll do them much good. */ loff_t n = 0;
if (which == 0)
return 0;
return -ENOSYS; down(&module_mutex);
list_for_each(i, &modules) {
if (n++ == *pos)
break;
}
if (i == &modules)
return NULL;
return i;
} }
asmlinkage long static void *m_next(struct seq_file *m, void *p, loff_t *pos)
sys_get_kernel_syms(struct kernel_sym *table)
{ {
return -ENOSYS; struct list_head *i = p;
(*pos)++;
if (i->next == &modules)
return NULL;
return i->next;
} }
int try_inc_mod_count(struct module *mod) static void m_stop(struct seq_file *m, void *p)
{ {
return 1; up(&module_mutex);
} }
void print_modules(void) static int m_show(struct seq_file *m, void *p)
{ {
struct module *mod = list_entry(p, struct module, list);
seq_printf(m, "%s %lu",
mod->name, mod->init_size + mod->core_size);
print_unload_info(m, mod);
return 0;
} }
struct seq_operations modules_op = {
.start = m_start,
.next = m_next,
.stop = m_stop,
.show = m_show
};
#endif /* CONFIG_MODULES */ static int __init init(void)
#if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS)
#define MAX_SYMBOL_SIZE 512
static void
address_to_exported_symbol(unsigned long address, const char **mod_name,
const char **sym_name, unsigned long *sym_start,
unsigned long *sym_end)
{ {
struct module *this_mod; /* Add kernel symbols to symbol table */
int i; kernel_symbols.num_syms = (__stop___ksymtab - __start___ksymtab);
kernel_symbols.syms = __start___ksymtab;
for (this_mod = module_list; this_mod; this_mod = this_mod->next) { list_add(&kernel_symbols.list, &symbols);
/* walk the symbol list of this module. Only symbols
who's address is smaller than the searched for address /* Add kernel exception table to exception tables */
are relevant; and only if it's better than the best so far */ kernel_extable.num_entries = (__stop___ex_table -__start___ex_table);
for (i = 0; i < this_mod->nsyms; i++) kernel_extable.entry = __start___ex_table;
if ((this_mod->syms[i].value <= address) && list_add(&kernel_extable.list, &extables);
(*sym_start < this_mod->syms[i].value)) { return 0;
*sym_start = this_mod->syms[i].value;
*sym_name = this_mod->syms[i].name;
*mod_name = this_mod->name;
if (i + 1 < this_mod->nsyms)
*sym_end = this_mod->syms[i+1].value;
else
*sym_end = (unsigned long) this_mod + this_mod->size;
}
}
} }
void /* Obsolete lvalue for broken code which asks about usage */
print_symbol(const char *fmt, unsigned long address) int module_dummy_usage = 1;
{
/* static to not take up stackspace; if we race here too bad */
static char buffer[MAX_SYMBOL_SIZE];
const char *mod_name = NULL, *sec_name = NULL, *sym_name = NULL;
unsigned long mod_start, mod_end, sec_start, sec_end,
sym_start, sym_end;
char *tag = "";
memset(buffer, 0, MAX_SYMBOL_SIZE); /* Call this at boot */
__initcall(init);
sym_start = 0;
if (!kallsyms_address_to_symbol(address, &mod_name, &mod_start, &mod_end, &sec_name, &sec_start, &sec_end, &sym_name, &sym_start, &sym_end)) {
tag = "E ";
address_to_exported_symbol(address, &mod_name, &sym_name, &sym_start, &sym_end);
}
if (sym_start) {
if (*mod_name)
snprintf(buffer, MAX_SYMBOL_SIZE - 1, "%s%s+%#x/%#x [%s]",
tag, sym_name,
(unsigned int)(address - sym_start),
(unsigned int)(sym_end - sym_start),
mod_name);
else
snprintf(buffer, MAX_SYMBOL_SIZE - 1, "%s%s+%#x/%#x",
tag, sym_name,
(unsigned int)(address - sym_start),
(unsigned int)(sym_end - sym_start));
printk(fmt, buffer);
}
#if 0
else {
printk(fmt, "[unresolved]");
}
#endif
}
#endif
...@@ -206,6 +206,8 @@ cond_syscall(sys_acct) ...@@ -206,6 +206,8 @@ cond_syscall(sys_acct)
cond_syscall(sys_lookup_dcookie) cond_syscall(sys_lookup_dcookie)
cond_syscall(sys_swapon) cond_syscall(sys_swapon)
cond_syscall(sys_swapoff) cond_syscall(sys_swapoff)
cond_syscall(sys_init_module)
cond_syscall(sys_delete_module)
static int set_one_prio(struct task_struct *p, int niceval, int error) static int set_one_prio(struct task_struct *p, int niceval, int error)
{ {
......
...@@ -19,3 +19,5 @@ EXPORT_SYMBOL(zlib_deflateReset); ...@@ -19,3 +19,5 @@ EXPORT_SYMBOL(zlib_deflateReset);
EXPORT_SYMBOL(zlib_deflateCopy); EXPORT_SYMBOL(zlib_deflateCopy);
EXPORT_SYMBOL(zlib_deflateParams); EXPORT_SYMBOL(zlib_deflateParams);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
no_module_init;
...@@ -20,3 +20,5 @@ EXPORT_SYMBOL(zlib_inflateReset); ...@@ -20,3 +20,5 @@ EXPORT_SYMBOL(zlib_inflateReset);
EXPORT_SYMBOL(zlib_inflateSyncPoint); EXPORT_SYMBOL(zlib_inflateSyncPoint);
EXPORT_SYMBOL(zlib_inflateIncomp); EXPORT_SYMBOL(zlib_inflateIncomp);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
no_module_init;
...@@ -374,7 +374,7 @@ int ip_nat_helper_register(struct ip_nat_helper *me) ...@@ -374,7 +374,7 @@ int ip_nat_helper_register(struct ip_nat_helper *me)
&& ct_helper->me) { && ct_helper->me) {
__MOD_INC_USE_COUNT(ct_helper->me); __MOD_INC_USE_COUNT(ct_helper->me);
} else { } else {
#ifdef CONFIG_MODULES
/* We are a NAT helper for protocol X. If we need /* We are a NAT helper for protocol X. If we need
* respective conntrack helper for protoccol X, compute * respective conntrack helper for protoccol X, compute
* conntrack helper name and try to load module */ * conntrack helper name and try to load module */
...@@ -403,6 +403,7 @@ int ip_nat_helper_register(struct ip_nat_helper *me) ...@@ -403,6 +403,7 @@ int ip_nat_helper_register(struct ip_nat_helper *me)
"because kernel was compiled without kernel " "because kernel was compiled without kernel "
"module loader support\n", name); "module loader support\n", name);
return -EBUSY; return -EBUSY;
#endif
#endif #endif
} }
} }
...@@ -466,9 +467,12 @@ void ip_nat_helper_unregister(struct ip_nat_helper *me) ...@@ -466,9 +467,12 @@ void ip_nat_helper_unregister(struct ip_nat_helper *me)
if ((ct_helper = ip_ct_find_helper(&me->tuple)) if ((ct_helper = ip_ct_find_helper(&me->tuple))
&& ct_helper->me) { && ct_helper->me) {
__MOD_DEC_USE_COUNT(ct_helper->me); __MOD_DEC_USE_COUNT(ct_helper->me);
} else }
#ifdef CONFIG_MODULES
else
printk("%s: unable to decrement usage count" printk("%s: unable to decrement usage count"
" of conntrack helper %s\n", " of conntrack helper %s\n",
__FUNCTION__, me->me->name); __FUNCTION__, me->me->name);
#endif
} }
} }
...@@ -538,6 +538,7 @@ struct net_proto_family inet6_family_ops = { ...@@ -538,6 +538,7 @@ struct net_proto_family inet6_family_ops = {
}; };
#ifdef MODULE #ifdef MODULE
#if 0 /* FIXME --RR */
int ipv6_unload(void) int ipv6_unload(void)
{ {
if (!unloadable) return 1; if (!unloadable) return 1;
...@@ -545,6 +546,8 @@ int ipv6_unload(void) ...@@ -545,6 +546,8 @@ int ipv6_unload(void)
return atomic_read(&(__this_module.uc.usecount)) - 3; return atomic_read(&(__this_module.uc.usecount)) - 3;
} }
#endif #endif
#endif
#endif
#if defined(MODULE) && defined(CONFIG_SYSCTL) #if defined(MODULE) && defined(CONFIG_SYSCTL)
extern void ipv6_sysctl_register(void); extern void ipv6_sysctl_register(void);
...@@ -624,10 +627,12 @@ static int __init inet6_init(void) ...@@ -624,10 +627,12 @@ static int __init inet6_init(void)
int err; int err;
#ifdef MODULE #ifdef MODULE
#if 0 /* FIXME --RR */
if (!mod_member_present(&__this_module, can_unload)) if (!mod_member_present(&__this_module, can_unload))
return -EINVAL; return -EINVAL;
__this_module.can_unload = &ipv6_unload; __this_module.can_unload = &ipv6_unload;
#endif
#endif #endif
printk(KERN_INFO "IPv6 v0.8 for NET4.0\n"); printk(KERN_INFO "IPv6 v0.8 for NET4.0\n");
......
...@@ -16,8 +16,8 @@ include scripts/Makefile.lib ...@@ -16,8 +16,8 @@ include scripts/Makefile.lib
# ========================================================================== # ==========================================================================
quiet_cmd_modules_install = INSTALL $(obj-m) quiet_cmd_modules_install = INSTALL $(obj-m)
cmd_modules_install = mkdir -p $(MODLIB)/kernel/$(obj); \ cmd_modules_install = mkdir -p $(MODLIB)/kernel && \
cp $(obj-m) $(MODLIB)/kernel/$(obj) cp $(obj-m) $(MODLIB)/kernel/
modules_install: $(subdir-ym) modules_install: $(subdir-ym)
ifneq ($(obj-m),) ifneq ($(obj-m),)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment