Commit 940a6d34 authored by Gu Zheng's avatar Gu Zheng Committed by Jaegeuk Kim

f2fs: move all the bio initialization into __bio_alloc

Move all the bio initialization into __bio_alloc, and some minor cleanups are
also added.

v3:
  Use 'bool' rather than 'int' as Kim suggested.

v2:
  Use 'is_read' rather than 'rw' as Yu Chao suggested.
  Remove the needless initialization of bio->bi_private.
Signed-off-by: default avatarGu Zheng <guz.fnst@cn.fujitsu.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk.kim@samsung.com>
parent ba0697ec
...@@ -24,20 +24,6 @@ ...@@ -24,20 +24,6 @@
#include "segment.h" #include "segment.h"
#include <trace/events/f2fs.h> #include <trace/events/f2fs.h>
/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct block_device *bdev, int npages)
{
struct bio *bio;
/* No failure on bio allocation */
bio = bio_alloc(GFP_NOIO, npages);
bio->bi_bdev = bdev;
bio->bi_private = NULL;
return bio;
}
static void f2fs_read_end_io(struct bio *bio, int err) static void f2fs_read_end_io(struct bio *bio, int err)
{ {
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
...@@ -93,6 +79,24 @@ static void f2fs_write_end_io(struct bio *bio, int err) ...@@ -93,6 +79,24 @@ static void f2fs_write_end_io(struct bio *bio, int err)
bio_put(bio); bio_put(bio);
} }
/*
* Low-level block read/write IO operations.
*/
static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
int npages, bool is_read)
{
struct bio *bio;
/* No failure on bio allocation */
bio = bio_alloc(GFP_NOIO, npages);
bio->bi_bdev = sbi->sb->s_bdev;
bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
return bio;
}
static void __submit_merged_bio(struct f2fs_bio_info *io) static void __submit_merged_bio(struct f2fs_bio_info *io)
{ {
struct f2fs_io_info *fio = &io->fio; struct f2fs_io_info *fio = &io->fio;
...@@ -104,16 +108,15 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) ...@@ -104,16 +108,15 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
rw = fio->rw | fio->rw_flag; rw = fio->rw | fio->rw_flag;
if (is_read_io(rw)) { if (is_read_io(rw)) {
trace_f2fs_submit_read_bio(io->sbi->sb, rw, fio->type, io->bio); trace_f2fs_submit_read_bio(io->sbi->sb, rw,
fio->type, io->bio);
submit_bio(rw, io->bio); submit_bio(rw, io->bio);
io->bio = NULL; } else {
return; trace_f2fs_submit_write_bio(io->sbi->sb, rw,
} fio->type, io->bio);
trace_f2fs_submit_write_bio(io->sbi->sb, rw, fio->type, io->bio);
/* /*
* META_FLUSH is only from the checkpoint procedure, and we should wait * META_FLUSH is only from the checkpoint procedure, and we
* this metadata bio for FS consistency. * should wait this metadata bio for FS consistency.
*/ */
if (fio->type == META_FLUSH) { if (fio->type == META_FLUSH) {
DECLARE_COMPLETION_ONSTACK(wait); DECLARE_COMPLETION_ONSTACK(wait);
...@@ -123,6 +126,8 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) ...@@ -123,6 +126,8 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
} else { } else {
submit_bio(rw, io->bio); submit_bio(rw, io->bio);
} }
}
io->bio = NULL; io->bio = NULL;
} }
...@@ -152,17 +157,12 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, ...@@ -152,17 +157,12 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page, int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
block_t blk_addr, int rw) block_t blk_addr, int rw)
{ {
struct block_device *bdev = sbi->sb->s_bdev;
struct bio *bio; struct bio *bio;
trace_f2fs_submit_page_bio(page, blk_addr, rw); trace_f2fs_submit_page_bio(page, blk_addr, rw);
/* Allocate a new bio */ /* Allocate a new bio */
bio = __bio_alloc(bdev, 1); bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
/* Initialize the bio */
bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
bio->bi_end_io = is_read_io(rw) ? f2fs_read_end_io : f2fs_write_end_io;
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
bio_put(bio); bio_put(bio);
...@@ -178,17 +178,16 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, ...@@ -178,17 +178,16 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
block_t blk_addr, struct f2fs_io_info *fio) block_t blk_addr, struct f2fs_io_info *fio)
{ {
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct block_device *bdev = sbi->sb->s_bdev;
struct f2fs_bio_info *io; struct f2fs_bio_info *io;
int bio_blocks; bool is_read = is_read_io(fio->rw);
io = is_read_io(fio->rw) ? &sbi->read_io : &sbi->write_io[btype]; io = is_read ? &sbi->read_io : &sbi->write_io[btype];
verify_block_addr(sbi, blk_addr); verify_block_addr(sbi, blk_addr);
mutex_lock(&io->io_mutex); mutex_lock(&io->io_mutex);
if (!is_read_io(fio->rw)) if (!is_read)
inc_page_count(sbi, F2FS_WRITEBACK); inc_page_count(sbi, F2FS_WRITEBACK);
if (io->bio && (io->last_block_in_bio != blk_addr - 1 || if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
...@@ -196,17 +195,10 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page, ...@@ -196,17 +195,10 @@ void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
__submit_merged_bio(io); __submit_merged_bio(io);
alloc_new: alloc_new:
if (io->bio == NULL) { if (io->bio == NULL) {
bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
io->bio = __bio_alloc(bdev, bio_blocks);
io->bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
io->bio->bi_end_io = is_read_io(fio->rw) ? f2fs_read_end_io :
f2fs_write_end_io;
io->fio = *fio; io->fio = *fio;
/*
* The end_io will be assigned at the sumbission phase.
* Until then, let bio_add_page() merge consecutive IOs as much
* as possible.
*/
} }
if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) < if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment