Commit 6143e2ce authored by Christoph Hellwig's avatar Christoph Hellwig Committed by NeilBrown

raid5-cache: use bio chaining

Simplify the bio completion handler by using bio chaining and submitting
bios as soon as they are full.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarShaohua Li <shli@fb.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
parent 2b8ef16e
......@@ -100,8 +100,6 @@ struct r5l_io_unit {
struct page *meta_page; /* store meta block */
int meta_offset; /* current offset in meta_page */
struct bio_list bios;
atomic_t pending_io; /* pending bios not written to log yet */
struct bio *current_bio;/* current_bio accepting new data */
atomic_t pending_stripe;/* how many stripes not flushed to raid */
......@@ -112,6 +110,7 @@ struct r5l_io_unit {
struct list_head stripe_list; /* stripes added to the io_unit */
int state;
bool need_split_bio;
};
/* r5l_io_unit state */
......@@ -215,9 +214,6 @@ static void r5l_log_endio(struct bio *bio)
bio_put(bio);
if (!atomic_dec_and_test(&io->pending_io))
return;
spin_lock_irqsave(&log->io_list_lock, flags);
__r5l_set_io_unit_state(io, IO_UNIT_IO_END);
if (log->need_cache_flush)
......@@ -235,7 +231,6 @@ static void r5l_submit_current_io(struct r5l_log *log)
{
struct r5l_io_unit *io = log->current_io;
struct r5l_meta_block *block;
struct bio *bio;
unsigned long flags;
u32 crc;
......@@ -252,22 +247,17 @@ static void r5l_submit_current_io(struct r5l_log *log)
__r5l_set_io_unit_state(io, IO_UNIT_IO_START);
spin_unlock_irqrestore(&log->io_list_lock, flags);
while ((bio = bio_list_pop(&io->bios)))
submit_bio(WRITE, bio);
submit_bio(WRITE, io->current_bio);
}
static struct bio *r5l_bio_alloc(struct r5l_log *log, struct r5l_io_unit *io)
static struct bio *r5l_bio_alloc(struct r5l_log *log)
{
struct bio *bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
bio->bi_rw = WRITE;
bio->bi_bdev = log->rdev->bdev;
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
bio->bi_end_io = r5l_log_endio;
bio->bi_private = io;
bio_list_add(&io->bios, bio);
atomic_inc(&io->pending_io);
return bio;
}
......@@ -283,7 +273,7 @@ static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
* of BLOCK_SECTORS.
*/
if (log->log_start == 0)
io->current_bio = NULL;
io->need_split_bio = true;
io->log_end = log->log_start;
}
......@@ -296,7 +286,6 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
/* We can't handle memory allocate failure so far */
io = kmem_cache_zalloc(log->io_kc, GFP_NOIO | __GFP_NOFAIL);
io->log = log;
bio_list_init(&io->bios);
INIT_LIST_HEAD(&io->log_sibling);
INIT_LIST_HEAD(&io->stripe_list);
io->state = IO_UNIT_RUNNING;
......@@ -312,7 +301,9 @@ static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
io->meta_offset = sizeof(struct r5l_meta_block);
io->seq = log->seq++;
io->current_bio = r5l_bio_alloc(log, io);
io->current_bio = r5l_bio_alloc(log);
io->current_bio->bi_end_io = r5l_log_endio;
io->current_bio->bi_private = io;
bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
r5_reserve_log_entry(log, io);
......@@ -361,15 +352,18 @@ static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
{
struct r5l_io_unit *io = log->current_io;
alloc_bio:
if (!io->current_bio)
io->current_bio = r5l_bio_alloc(log, io);
if (io->need_split_bio) {
struct bio *prev = io->current_bio;
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) {
io->current_bio = NULL;
goto alloc_bio;
io->current_bio = r5l_bio_alloc(log);
bio_chain(io->current_bio, prev);
submit_bio(WRITE, prev);
}
if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
BUG();
r5_reserve_log_entry(log, io);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment