Commit 4254bba1 authored by Kent Overstreet's avatar Kent Overstreet Committed by Jens Axboe

block: Kill bi_destructor

Now that we've got generic code for freeing bios allocated from bio
pools, this isn't needed anymore.

This patch also makes bio_free() static, since without bi_destructor
there should be no need for it to be called anywhere else.

bio_free() is now only called from bio_put, so we can refactor those a
bit - move some code from bio_put() to bio_free() and kill the redundant
bio->bi_next = NULL.

v5: Switch to BIO_KMALLOC_POOL ((void *)~0), per Boaz
v6: BIO_KMALLOC_POOL now NULL, drop bio_free's EXPORT_SYMBOL
v7: No #define BIO_KMALLOC_POOL anymore
Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent ccc5c9ca
...@@ -465,7 +465,6 @@ struct bio { ...@@ -465,7 +465,6 @@ struct bio {
bio_end_io_t *bi_end_io; /* bi_end_io (bio) */ bio_end_io_t *bi_end_io; /* bi_end_io (bio) */
atomic_t bi_cnt; /* pin count: free when it hits zero */ atomic_t bi_cnt; /* pin count: free when it hits zero */
void *bi_private; void *bi_private;
bio_destructor_t *bi_destructor; /* bi_destructor (bio) */
}; };
With this multipage bio design: With this multipage bio design:
...@@ -647,10 +646,6 @@ for a non-clone bio. There are the 6 pools setup for different size biovecs, ...@@ -647,10 +646,6 @@ for a non-clone bio. There are the 6 pools setup for different size biovecs,
so bio_alloc(gfp_mask, nr_iovecs) will allocate a vec_list of the so bio_alloc(gfp_mask, nr_iovecs) will allocate a vec_list of the
given size from these slabs. given size from these slabs.
The bi_destructor() routine takes into account the possibility of the bio
having originated from a different source (see later discussions on
n/w to block transfers and kvec_cb)
The bio_get() routine may be used to hold an extra reference on a bio prior The bio_get() routine may be used to hold an extra reference on a bio prior
to i/o submission, if the bio fields are likely to be accessed after the to i/o submission, if the bio fields are likely to be accessed after the
i/o is issued (since the bio may otherwise get freed in case i/o completion i/o is issued (since the bio may otherwise get freed in case i/o completion
......
...@@ -2807,7 +2807,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src, ...@@ -2807,7 +2807,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
free_and_out: free_and_out:
if (bio) if (bio)
bio_free(bio, bs); bio_put(bio);
blk_rq_unprep_clone(rq); blk_rq_unprep_clone(rq);
return -ENOMEM; return -ENOMEM;
......
...@@ -233,26 +233,37 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, ...@@ -233,26 +233,37 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
return bvl; return bvl;
} }
void bio_free(struct bio *bio, struct bio_set *bs) static void __bio_free(struct bio *bio)
{ {
void *p; bio_disassociate_task(bio);
if (bio_has_allocated_vec(bio))
bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
if (bio_integrity(bio)) if (bio_integrity(bio))
bio_integrity_free(bio); bio_integrity_free(bio);
}
/* static void bio_free(struct bio *bio)
* If we have front padding, adjust the bio pointer before freeing {
*/ struct bio_set *bs = bio->bi_pool;
p = bio; void *p;
if (bs->front_pad)
__bio_free(bio);
if (bs) {
if (bio_has_allocated_vec(bio))
bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
/*
* If we have front padding, adjust the bio pointer before freeing
*/
p = bio;
p -= bs->front_pad; p -= bs->front_pad;
mempool_free(p, bs->bio_pool); mempool_free(p, bs->bio_pool);
} else {
/* Bio was allocated by bio_kmalloc() */
kfree(bio);
}
} }
EXPORT_SYMBOL(bio_free);
void bio_init(struct bio *bio) void bio_init(struct bio *bio)
{ {
...@@ -276,10 +287,7 @@ void bio_reset(struct bio *bio) ...@@ -276,10 +287,7 @@ void bio_reset(struct bio *bio)
{ {
unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
if (bio_integrity(bio)) __bio_free(bio);
bio_integrity_free(bio);
bio_disassociate_task(bio);
memset(bio, 0, BIO_RESET_BYTES); memset(bio, 0, BIO_RESET_BYTES);
bio->bi_flags = flags|(1 << BIO_UPTODATE); bio->bi_flags = flags|(1 << BIO_UPTODATE);
...@@ -362,13 +370,6 @@ struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) ...@@ -362,13 +370,6 @@ struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
} }
EXPORT_SYMBOL(bio_alloc); EXPORT_SYMBOL(bio_alloc);
static void bio_kmalloc_destructor(struct bio *bio)
{
if (bio_integrity(bio))
bio_integrity_free(bio);
kfree(bio);
}
/** /**
* bio_kmalloc - allocate a bio for I/O using kmalloc() * bio_kmalloc - allocate a bio for I/O using kmalloc()
* @gfp_mask: the GFP_ mask given to the slab allocator * @gfp_mask: the GFP_ mask given to the slab allocator
...@@ -395,7 +396,6 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) ...@@ -395,7 +396,6 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
bio->bi_max_vecs = nr_iovecs; bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bio->bi_inline_vecs; bio->bi_io_vec = bio->bi_inline_vecs;
bio->bi_destructor = bio_kmalloc_destructor;
return bio; return bio;
} }
...@@ -431,20 +431,8 @@ void bio_put(struct bio *bio) ...@@ -431,20 +431,8 @@ void bio_put(struct bio *bio)
/* /*
* last put frees it * last put frees it
*/ */
if (atomic_dec_and_test(&bio->bi_cnt)) { if (atomic_dec_and_test(&bio->bi_cnt))
bio_disassociate_task(bio); bio_free(bio);
bio->bi_next = NULL;
/*
* This if statement is temporary - bi_pool is replacing
* bi_destructor, but bi_destructor will be taken out in another
* patch.
*/
if (bio->bi_pool)
bio_free(bio, bio->bi_pool);
else
bio->bi_destructor(bio);
}
} }
EXPORT_SYMBOL(bio_put); EXPORT_SYMBOL(bio_put);
......
...@@ -216,7 +216,6 @@ extern struct bio *bio_alloc(gfp_t, unsigned int); ...@@ -216,7 +216,6 @@ extern struct bio *bio_alloc(gfp_t, unsigned int);
extern struct bio *bio_kmalloc(gfp_t, unsigned int); extern struct bio *bio_kmalloc(gfp_t, unsigned int);
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *); extern void bio_put(struct bio *);
extern void bio_free(struct bio *, struct bio_set *);
extern void bio_endio(struct bio *, int); extern void bio_endio(struct bio *, int);
struct request_queue; struct request_queue;
......
...@@ -84,11 +84,8 @@ struct bio { ...@@ -84,11 +84,8 @@ struct bio {
struct bio_vec *bi_io_vec; /* the actual vec list */ struct bio_vec *bi_io_vec; /* the actual vec list */
/* If bi_pool is non NULL, bi_destructor is not called */
struct bio_set *bi_pool; struct bio_set *bi_pool;
bio_destructor_t *bi_destructor; /* destructor */
/* /*
* We can inline a number of vecs at the end of the bio, to avoid * We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member * double allocations for a small number of bio_vecs. This member
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment