Commit 028867ac authored by Alasdair G Kergon's avatar Alasdair G Kergon Committed by Linus Torvalds

dm: use kmem_cache macro

Use new KMEM_CACHE() macro and make the newly-exposed structure names more
meaningful.  Also remove some superfluous casts and inlines (let a modern
compiler be the judge).
Acked-by: default avatarChristoph Lameter <clameter@sgi.com>
Signed-off-by: default avatarAlasdair G Kergon <agk@redhat.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79e15ae4
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
/* /*
* per bio private data * per bio private data
*/ */
struct crypt_io { struct dm_crypt_io {
struct dm_target *target; struct dm_target *target;
struct bio *base_bio; struct bio *base_bio;
struct work_struct work; struct work_struct work;
...@@ -106,7 +106,7 @@ struct crypt_config { ...@@ -106,7 +106,7 @@ struct crypt_config {
static struct kmem_cache *_crypt_io_pool; static struct kmem_cache *_crypt_io_pool;
static void clone_init(struct crypt_io *, struct bio *); static void clone_init(struct dm_crypt_io *, struct bio *);
/* /*
* Different IV generation algorithms: * Different IV generation algorithms:
...@@ -382,7 +382,7 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -382,7 +382,7 @@ static int crypt_convert(struct crypt_config *cc,
static void dm_crypt_bio_destructor(struct bio *bio) static void dm_crypt_bio_destructor(struct bio *bio)
{ {
struct crypt_io *io = bio->bi_private; struct dm_crypt_io *io = bio->bi_private;
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
bio_free(bio, cc->bs); bio_free(bio, cc->bs);
...@@ -393,7 +393,7 @@ static int crypt_convert(struct crypt_config *cc, ...@@ -393,7 +393,7 @@ static int crypt_convert(struct crypt_config *cc,
* This should never violate the device limitations * This should never violate the device limitations
* May return a smaller bio when running out of pages * May return a smaller bio when running out of pages
*/ */
static struct bio *crypt_alloc_buffer(struct crypt_io *io, unsigned int size) static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
{ {
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
struct bio *clone; struct bio *clone;
...@@ -479,7 +479,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, ...@@ -479,7 +479,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc,
* One of the bios was finished. Check for completion of * One of the bios was finished. Check for completion of
* the whole request and correctly clean up the buffer. * the whole request and correctly clean up the buffer.
*/ */
static void dec_pending(struct crypt_io *io, int error) static void dec_pending(struct dm_crypt_io *io, int error)
{ {
struct crypt_config *cc = (struct crypt_config *) io->target->private; struct crypt_config *cc = (struct crypt_config *) io->target->private;
...@@ -503,7 +503,7 @@ static void dec_pending(struct crypt_io *io, int error) ...@@ -503,7 +503,7 @@ static void dec_pending(struct crypt_io *io, int error)
static struct workqueue_struct *_kcryptd_workqueue; static struct workqueue_struct *_kcryptd_workqueue;
static void kcryptd_do_work(struct work_struct *work); static void kcryptd_do_work(struct work_struct *work);
static void kcryptd_queue_io(struct crypt_io *io) static void kcryptd_queue_io(struct dm_crypt_io *io)
{ {
INIT_WORK(&io->work, kcryptd_do_work); INIT_WORK(&io->work, kcryptd_do_work);
queue_work(_kcryptd_workqueue, &io->work); queue_work(_kcryptd_workqueue, &io->work);
...@@ -511,7 +511,7 @@ static void kcryptd_queue_io(struct crypt_io *io) ...@@ -511,7 +511,7 @@ static void kcryptd_queue_io(struct crypt_io *io)
static int crypt_endio(struct bio *clone, unsigned int done, int error) static int crypt_endio(struct bio *clone, unsigned int done, int error)
{ {
struct crypt_io *io = clone->bi_private; struct dm_crypt_io *io = clone->bi_private;
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
unsigned read_io = bio_data_dir(clone) == READ; unsigned read_io = bio_data_dir(clone) == READ;
...@@ -545,7 +545,7 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error) ...@@ -545,7 +545,7 @@ static int crypt_endio(struct bio *clone, unsigned int done, int error)
return error; return error;
} }
static void clone_init(struct crypt_io *io, struct bio *clone) static void clone_init(struct dm_crypt_io *io, struct bio *clone)
{ {
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
...@@ -556,7 +556,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone) ...@@ -556,7 +556,7 @@ static void clone_init(struct crypt_io *io, struct bio *clone)
clone->bi_destructor = dm_crypt_bio_destructor; clone->bi_destructor = dm_crypt_bio_destructor;
} }
static void process_read(struct crypt_io *io) static void process_read(struct dm_crypt_io *io)
{ {
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio; struct bio *base_bio = io->base_bio;
...@@ -587,7 +587,7 @@ static void process_read(struct crypt_io *io) ...@@ -587,7 +587,7 @@ static void process_read(struct crypt_io *io)
generic_make_request(clone); generic_make_request(clone);
} }
static void process_write(struct crypt_io *io) static void process_write(struct dm_crypt_io *io)
{ {
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
struct bio *base_bio = io->base_bio; struct bio *base_bio = io->base_bio;
...@@ -644,7 +644,7 @@ static void process_write(struct crypt_io *io) ...@@ -644,7 +644,7 @@ static void process_write(struct crypt_io *io)
} }
} }
static void process_read_endio(struct crypt_io *io) static void process_read_endio(struct dm_crypt_io *io)
{ {
struct crypt_config *cc = io->target->private; struct crypt_config *cc = io->target->private;
struct convert_context ctx; struct convert_context ctx;
...@@ -657,7 +657,7 @@ static void process_read_endio(struct crypt_io *io) ...@@ -657,7 +657,7 @@ static void process_read_endio(struct crypt_io *io)
static void kcryptd_do_work(struct work_struct *work) static void kcryptd_do_work(struct work_struct *work)
{ {
struct crypt_io *io = container_of(work, struct crypt_io, work); struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
if (io->post_process) if (io->post_process)
process_read_endio(io); process_read_endio(io);
...@@ -939,7 +939,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio, ...@@ -939,7 +939,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context) union map_info *map_context)
{ {
struct crypt_config *cc = ti->private; struct crypt_config *cc = ti->private;
struct crypt_io *io; struct dm_crypt_io *io;
if (bio_barrier(bio)) if (bio_barrier(bio))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -1062,9 +1062,7 @@ static int __init dm_crypt_init(void) ...@@ -1062,9 +1062,7 @@ static int __init dm_crypt_init(void)
{ {
int r; int r;
_crypt_io_pool = kmem_cache_create("dm-crypt_io", _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
sizeof(struct crypt_io),
0, 0, NULL, NULL);
if (!_crypt_io_pool) if (!_crypt_io_pool)
return -ENOMEM; return -ENOMEM;
......
...@@ -37,7 +37,7 @@ struct delay_c { ...@@ -37,7 +37,7 @@ struct delay_c {
unsigned writes; unsigned writes;
}; };
struct delay_info { struct dm_delay_info {
struct delay_c *context; struct delay_c *context;
struct list_head list; struct list_head list;
struct bio *bio; struct bio *bio;
...@@ -80,7 +80,7 @@ static void flush_bios(struct bio *bio) ...@@ -80,7 +80,7 @@ static void flush_bios(struct bio *bio)
static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all)
{ {
struct delay_info *delayed, *next; struct dm_delay_info *delayed, *next;
unsigned long next_expires = 0; unsigned long next_expires = 0;
int start_timer = 0; int start_timer = 0;
BIO_LIST(flush_bios); BIO_LIST(flush_bios);
...@@ -227,7 +227,7 @@ static void delay_dtr(struct dm_target *ti) ...@@ -227,7 +227,7 @@ static void delay_dtr(struct dm_target *ti)
static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) static int delay_bio(struct delay_c *dc, int delay, struct bio *bio)
{ {
struct delay_info *delayed; struct dm_delay_info *delayed;
unsigned long expires = 0; unsigned long expires = 0;
if (!delay || !atomic_read(&dc->may_delay)) if (!delay || !atomic_read(&dc->may_delay))
...@@ -338,10 +338,7 @@ static int __init dm_delay_init(void) ...@@ -338,10 +338,7 @@ static int __init dm_delay_init(void)
goto bad_queue; goto bad_queue;
} }
delayed_cache = kmem_cache_create("dm-delay", delayed_cache = KMEM_CACHE(dm_delay_info, 0);
sizeof(struct delay_info),
__alignof__(struct delay_info),
0, NULL, NULL);
if (!delayed_cache) { if (!delayed_cache) {
DMERR("Couldn't create delayed bio cache."); DMERR("Couldn't create delayed bio cache.");
goto bad_memcache; goto bad_memcache;
......
...@@ -127,7 +127,7 @@ struct pstore { ...@@ -127,7 +127,7 @@ struct pstore {
struct dm_io_client *io_client; struct dm_io_client *io_client;
}; };
static inline unsigned int sectors_to_pages(unsigned int sectors) static unsigned sectors_to_pages(unsigned sectors)
{ {
return sectors / (PAGE_SIZE >> 9); return sectors / (PAGE_SIZE >> 9);
} }
...@@ -393,7 +393,7 @@ static int read_exceptions(struct pstore *ps) ...@@ -393,7 +393,7 @@ static int read_exceptions(struct pstore *ps)
return 0; return 0;
} }
static inline struct pstore *get_info(struct exception_store *store) static struct pstore *get_info(struct exception_store *store)
{ {
return (struct pstore *) store->context; return (struct pstore *) store->context;
} }
...@@ -480,7 +480,7 @@ static int persistent_read_metadata(struct exception_store *store) ...@@ -480,7 +480,7 @@ static int persistent_read_metadata(struct exception_store *store)
} }
static int persistent_prepare(struct exception_store *store, static int persistent_prepare(struct exception_store *store,
struct exception *e) struct dm_snap_exception *e)
{ {
struct pstore *ps = get_info(store); struct pstore *ps = get_info(store);
uint32_t stride; uint32_t stride;
...@@ -505,7 +505,7 @@ static int persistent_prepare(struct exception_store *store, ...@@ -505,7 +505,7 @@ static int persistent_prepare(struct exception_store *store,
} }
static void persistent_commit(struct exception_store *store, static void persistent_commit(struct exception_store *store,
struct exception *e, struct dm_snap_exception *e,
void (*callback) (void *, int success), void (*callback) (void *, int success),
void *callback_context) void *callback_context)
{ {
...@@ -616,7 +616,8 @@ static int transient_read_metadata(struct exception_store *store) ...@@ -616,7 +616,8 @@ static int transient_read_metadata(struct exception_store *store)
return 0; return 0;
} }
static int transient_prepare(struct exception_store *store, struct exception *e) static int transient_prepare(struct exception_store *store,
struct dm_snap_exception *e)
{ {
struct transient_c *tc = (struct transient_c *) store->context; struct transient_c *tc = (struct transient_c *) store->context;
sector_t size = get_dev_size(store->snap->cow->bdev); sector_t size = get_dev_size(store->snap->cow->bdev);
...@@ -631,9 +632,9 @@ static int transient_prepare(struct exception_store *store, struct exception *e) ...@@ -631,9 +632,9 @@ static int transient_prepare(struct exception_store *store, struct exception *e)
} }
static void transient_commit(struct exception_store *store, static void transient_commit(struct exception_store *store,
struct exception *e, struct dm_snap_exception *e,
void (*callback) (void *, int success), void (*callback) (void *, int success),
void *callback_context) void *callback_context)
{ {
/* Just succeed */ /* Just succeed */
callback(callback_context, 1); callback(callback_context, 1);
......
...@@ -83,7 +83,7 @@ struct multipath { ...@@ -83,7 +83,7 @@ struct multipath {
struct work_struct trigger_event; struct work_struct trigger_event;
/* /*
* We must use a mempool of mpath_io structs so that we * We must use a mempool of dm_mpath_io structs so that we
* can resubmit bios on error. * can resubmit bios on error.
*/ */
mempool_t *mpio_pool; mempool_t *mpio_pool;
...@@ -92,7 +92,7 @@ struct multipath { ...@@ -92,7 +92,7 @@ struct multipath {
/* /*
* Context information attached to each bio we process. * Context information attached to each bio we process.
*/ */
struct mpath_io { struct dm_mpath_io {
struct pgpath *pgpath; struct pgpath *pgpath;
struct dm_bio_details details; struct dm_bio_details details;
}; };
...@@ -122,7 +122,7 @@ static struct pgpath *alloc_pgpath(void) ...@@ -122,7 +122,7 @@ static struct pgpath *alloc_pgpath(void)
return pgpath; return pgpath;
} }
static inline void free_pgpath(struct pgpath *pgpath) static void free_pgpath(struct pgpath *pgpath)
{ {
kfree(pgpath); kfree(pgpath);
} }
...@@ -299,8 +299,8 @@ static int __must_push_back(struct multipath *m) ...@@ -299,8 +299,8 @@ static int __must_push_back(struct multipath *m)
dm_noflush_suspending(m->ti)); dm_noflush_suspending(m->ti));
} }
static int map_io(struct multipath *m, struct bio *bio, struct mpath_io *mpio, static int map_io(struct multipath *m, struct bio *bio,
unsigned was_queued) struct dm_mpath_io *mpio, unsigned was_queued)
{ {
int r = DM_MAPIO_REMAPPED; int r = DM_MAPIO_REMAPPED;
unsigned long flags; unsigned long flags;
...@@ -374,7 +374,7 @@ static void dispatch_queued_ios(struct multipath *m) ...@@ -374,7 +374,7 @@ static void dispatch_queued_ios(struct multipath *m)
int r; int r;
unsigned long flags; unsigned long flags;
struct bio *bio = NULL, *next; struct bio *bio = NULL, *next;
struct mpath_io *mpio; struct dm_mpath_io *mpio;
union map_info *info; union map_info *info;
spin_lock_irqsave(&m->lock, flags); spin_lock_irqsave(&m->lock, flags);
...@@ -795,7 +795,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio, ...@@ -795,7 +795,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio,
union map_info *map_context) union map_info *map_context)
{ {
int r; int r;
struct mpath_io *mpio; struct dm_mpath_io *mpio;
struct multipath *m = (struct multipath *) ti->private; struct multipath *m = (struct multipath *) ti->private;
if (bio_barrier(bio)) if (bio_barrier(bio))
...@@ -1014,7 +1014,7 @@ void dm_pg_init_complete(struct dm_path *path, unsigned err_flags) ...@@ -1014,7 +1014,7 @@ void dm_pg_init_complete(struct dm_path *path, unsigned err_flags)
* end_io handling * end_io handling
*/ */
static int do_end_io(struct multipath *m, struct bio *bio, static int do_end_io(struct multipath *m, struct bio *bio,
int error, struct mpath_io *mpio) int error, struct dm_mpath_io *mpio)
{ {
struct hw_handler *hwh = &m->hw_handler; struct hw_handler *hwh = &m->hw_handler;
unsigned err_flags = MP_FAIL_PATH; /* Default behavior */ unsigned err_flags = MP_FAIL_PATH; /* Default behavior */
...@@ -1075,8 +1075,8 @@ static int do_end_io(struct multipath *m, struct bio *bio, ...@@ -1075,8 +1075,8 @@ static int do_end_io(struct multipath *m, struct bio *bio,
static int multipath_end_io(struct dm_target *ti, struct bio *bio, static int multipath_end_io(struct dm_target *ti, struct bio *bio,
int error, union map_info *map_context) int error, union map_info *map_context)
{ {
struct multipath *m = (struct multipath *) ti->private; struct multipath *m = ti->private;
struct mpath_io *mpio = (struct mpath_io *) map_context->ptr; struct dm_mpath_io *mpio = map_context->ptr;
struct pgpath *pgpath = mpio->pgpath; struct pgpath *pgpath = mpio->pgpath;
struct path_selector *ps; struct path_selector *ps;
int r; int r;
...@@ -1346,8 +1346,7 @@ static int __init dm_multipath_init(void) ...@@ -1346,8 +1346,7 @@ static int __init dm_multipath_init(void)
int r; int r;
/* allocate a slab for the dm_ios */ /* allocate a slab for the dm_ios */
_mpio_cache = kmem_cache_create("dm_mpath", sizeof(struct mpath_io), _mpio_cache = KMEM_CACHE(dm_mpath_io, 0);
0, 0, NULL, NULL);
if (!_mpio_cache) if (!_mpio_cache)
return -ENOMEM; return -ENOMEM;
......
This diff is collapsed.
...@@ -30,7 +30,7 @@ typedef sector_t chunk_t; ...@@ -30,7 +30,7 @@ typedef sector_t chunk_t;
* An exception is used where an old chunk of data has been * An exception is used where an old chunk of data has been
* replaced by a new one. * replaced by a new one.
*/ */
struct exception { struct dm_snap_exception {
struct list_head hash_list; struct list_head hash_list;
chunk_t old_chunk; chunk_t old_chunk;
...@@ -58,13 +58,13 @@ struct exception_store { ...@@ -58,13 +58,13 @@ struct exception_store {
* Find somewhere to store the next exception. * Find somewhere to store the next exception.
*/ */
int (*prepare_exception) (struct exception_store *store, int (*prepare_exception) (struct exception_store *store,
struct exception *e); struct dm_snap_exception *e);
/* /*
* Update the metadata with this exception. * Update the metadata with this exception.
*/ */
void (*commit_exception) (struct exception_store *store, void (*commit_exception) (struct exception_store *store,
struct exception *e, struct dm_snap_exception *e,
void (*callback) (void *, int success), void (*callback) (void *, int success),
void *callback_context); void *callback_context);
......
...@@ -45,7 +45,7 @@ struct dm_io { ...@@ -45,7 +45,7 @@ struct dm_io {
* One of these is allocated per target within a bio. Hopefully * One of these is allocated per target within a bio. Hopefully
* this will be simplified out one day. * this will be simplified out one day.
*/ */
struct target_io { struct dm_target_io {
struct dm_io *io; struct dm_io *io;
struct dm_target *ti; struct dm_target *ti;
union map_info info; union map_info info;
...@@ -54,7 +54,7 @@ struct target_io { ...@@ -54,7 +54,7 @@ struct target_io {
union map_info *dm_get_mapinfo(struct bio *bio) union map_info *dm_get_mapinfo(struct bio *bio)
{ {
if (bio && bio->bi_private) if (bio && bio->bi_private)
return &((struct target_io *)bio->bi_private)->info; return &((struct dm_target_io *)bio->bi_private)->info;
return NULL; return NULL;
} }
...@@ -132,14 +132,12 @@ static int __init local_init(void) ...@@ -132,14 +132,12 @@ static int __init local_init(void)
int r; int r;
/* allocate a slab for the dm_ios */ /* allocate a slab for the dm_ios */
_io_cache = kmem_cache_create("dm_io", _io_cache = KMEM_CACHE(dm_io, 0);
sizeof(struct dm_io), 0, 0, NULL, NULL);
if (!_io_cache) if (!_io_cache)
return -ENOMEM; return -ENOMEM;
/* allocate a slab for the target ios */ /* allocate a slab for the target ios */
_tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io), _tio_cache = KMEM_CACHE(dm_target_io, 0);
0, 0, NULL, NULL);
if (!_tio_cache) { if (!_tio_cache) {
kmem_cache_destroy(_io_cache); kmem_cache_destroy(_io_cache);
return -ENOMEM; return -ENOMEM;
...@@ -325,22 +323,22 @@ static int dm_blk_ioctl(struct inode *inode, struct file *file, ...@@ -325,22 +323,22 @@ static int dm_blk_ioctl(struct inode *inode, struct file *file,
return r; return r;
} }
static inline struct dm_io *alloc_io(struct mapped_device *md) static struct dm_io *alloc_io(struct mapped_device *md)
{ {
return mempool_alloc(md->io_pool, GFP_NOIO); return mempool_alloc(md->io_pool, GFP_NOIO);
} }
static inline void free_io(struct mapped_device *md, struct dm_io *io) static void free_io(struct mapped_device *md, struct dm_io *io)
{ {
mempool_free(io, md->io_pool); mempool_free(io, md->io_pool);
} }
static inline struct target_io *alloc_tio(struct mapped_device *md) static struct dm_target_io *alloc_tio(struct mapped_device *md)
{ {
return mempool_alloc(md->tio_pool, GFP_NOIO); return mempool_alloc(md->tio_pool, GFP_NOIO);
} }
static inline void free_tio(struct mapped_device *md, struct target_io *tio) static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
{ {
mempool_free(tio, md->tio_pool); mempool_free(tio, md->tio_pool);
} }
...@@ -498,7 +496,7 @@ static void dec_pending(struct dm_io *io, int error) ...@@ -498,7 +496,7 @@ static void dec_pending(struct dm_io *io, int error)
static int clone_endio(struct bio *bio, unsigned int done, int error) static int clone_endio(struct bio *bio, unsigned int done, int error)
{ {
int r = 0; int r = 0;
struct target_io *tio = bio->bi_private; struct dm_target_io *tio = bio->bi_private;
struct mapped_device *md = tio->io->md; struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io; dm_endio_fn endio = tio->ti->type->end_io;
...@@ -558,7 +556,7 @@ static sector_t max_io_len(struct mapped_device *md, ...@@ -558,7 +556,7 @@ static sector_t max_io_len(struct mapped_device *md,
} }
static void __map_bio(struct dm_target *ti, struct bio *clone, static void __map_bio(struct dm_target *ti, struct bio *clone,
struct target_io *tio) struct dm_target_io *tio)
{ {
int r; int r;
sector_t sector; sector_t sector;
...@@ -672,7 +670,7 @@ static void __clone_and_map(struct clone_info *ci) ...@@ -672,7 +670,7 @@ static void __clone_and_map(struct clone_info *ci)
struct bio *clone, *bio = ci->bio; struct bio *clone, *bio = ci->bio;
struct dm_target *ti = dm_table_find_target(ci->map, ci->sector); struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti); sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
struct target_io *tio; struct dm_target_io *tio;
/* /*
* Allocate a target io object. * Allocate a target io object.
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
static struct workqueue_struct *_kcopyd_wq; static struct workqueue_struct *_kcopyd_wq;
static struct work_struct _kcopyd_work; static struct work_struct _kcopyd_work;
static inline void wake(void) static void wake(void)
{ {
queue_work(_kcopyd_wq, &_kcopyd_work); queue_work(_kcopyd_wq, &_kcopyd_work);
} }
...@@ -226,10 +226,7 @@ static LIST_HEAD(_pages_jobs); ...@@ -226,10 +226,7 @@ static LIST_HEAD(_pages_jobs);
static int jobs_init(void) static int jobs_init(void)
{ {
_job_cache = kmem_cache_create("kcopyd-jobs", _job_cache = KMEM_CACHE(kcopyd_job, 0);
sizeof(struct kcopyd_job),
__alignof__(struct kcopyd_job),
0, NULL, NULL);
if (!_job_cache) if (!_job_cache)
return -ENOMEM; return -ENOMEM;
...@@ -258,7 +255,7 @@ static void jobs_exit(void) ...@@ -258,7 +255,7 @@ static void jobs_exit(void)
* Functions to push and pop a job onto the head of a given job * Functions to push and pop a job onto the head of a given job
* list. * list.
*/ */
static inline struct kcopyd_job *pop(struct list_head *jobs) static struct kcopyd_job *pop(struct list_head *jobs)
{ {
struct kcopyd_job *job = NULL; struct kcopyd_job *job = NULL;
unsigned long flags; unsigned long flags;
...@@ -274,7 +271,7 @@ static inline struct kcopyd_job *pop(struct list_head *jobs) ...@@ -274,7 +271,7 @@ static inline struct kcopyd_job *pop(struct list_head *jobs)
return job; return job;
} }
static inline void push(struct list_head *jobs, struct kcopyd_job *job) static void push(struct list_head *jobs, struct kcopyd_job *job)
{ {
unsigned long flags; unsigned long flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment