Commit c6bf3f0e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: use an on-stack bio in blkdev_issue_flush

There is no point in allocating memory for a synchronous flush.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarChaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Acked-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3175199a
...@@ -432,23 +432,18 @@ void blk_insert_flush(struct request *rq) ...@@ -432,23 +432,18 @@ void blk_insert_flush(struct request *rq)
/** /**
* blkdev_issue_flush - queue a flush * blkdev_issue_flush - queue a flush
* @bdev: blockdev to issue flush for * @bdev: blockdev to issue flush for
* @gfp_mask: memory allocation flags (for bio_alloc)
* *
* Description: * Description:
* Issue a flush for the block device in question. * Issue a flush for the block device in question.
*/ */
int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) int blkdev_issue_flush(struct block_device *bdev)
{ {
struct bio *bio; struct bio bio;
int ret = 0;
bio = bio_alloc(gfp_mask, 0); bio_init(&bio, NULL, 0);
bio_set_dev(bio, bdev); bio_set_dev(&bio, bdev);
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
return submit_bio_wait(&bio);
ret = submit_bio_wait(bio);
bio_put(bio);
return ret;
} }
EXPORT_SYMBOL(blkdev_issue_flush); EXPORT_SYMBOL(blkdev_issue_flush);
......
...@@ -819,7 +819,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set) ...@@ -819,7 +819,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block, ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
mblk->page); mblk->page);
if (ret == 0) if (ret == 0)
ret = blkdev_issue_flush(dev->bdev, GFP_NOIO); ret = blkdev_issue_flush(dev->bdev);
return ret; return ret;
} }
...@@ -862,7 +862,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, ...@@ -862,7 +862,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
/* Flush drive cache (this will also sync data) */ /* Flush drive cache (this will also sync data) */
if (ret == 0) if (ret == 0)
ret = blkdev_issue_flush(dev->bdev, GFP_NOIO); ret = blkdev_issue_flush(dev->bdev);
return ret; return ret;
} }
...@@ -933,7 +933,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) ...@@ -933,7 +933,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
/* If there are no dirty metadata blocks, just flush the device cache */ /* If there are no dirty metadata blocks, just flush the device cache */
if (list_empty(&write_list)) { if (list_empty(&write_list)) {
ret = blkdev_issue_flush(dev->bdev, GFP_NOIO); ret = blkdev_issue_flush(dev->bdev);
goto err; goto err;
} }
......
...@@ -1037,7 +1037,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr, ...@@ -1037,7 +1037,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr,
} }
/* flush the disk cache after recovery if necessary */ /* flush the disk cache after recovery if necessary */
ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL); ret = blkdev_issue_flush(rdev->bdev);
out: out:
__free_page(page); __free_page(page);
return ret; return ret;
......
...@@ -333,7 +333,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req) ...@@ -333,7 +333,7 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req)
u16 nvmet_bdev_flush(struct nvmet_req *req) u16 nvmet_bdev_flush(struct nvmet_req *req)
{ {
if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL)) if (blkdev_issue_flush(req->ns->bdev))
return NVME_SC_INTERNAL | NVME_SC_DNR; return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0; return 0;
} }
......
...@@ -680,7 +680,7 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync) ...@@ -680,7 +680,7 @@ int blkdev_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
* i_mutex and doing so causes performance issues with concurrent * i_mutex and doing so causes performance issues with concurrent
* O_SYNC writers to a block device. * O_SYNC writers to a block device.
*/ */
error = blkdev_issue_flush(bdev, GFP_KERNEL); error = blkdev_issue_flush(bdev);
if (error == -EOPNOTSUPP) if (error == -EOPNOTSUPP)
error = 0; error = 0;
......
...@@ -361,7 +361,7 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) ...@@ -361,7 +361,7 @@ int exfat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
if (err) if (err)
return err; return err;
return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); return blkdev_issue_flush(inode->i_sb->s_bdev);
} }
const struct file_operations exfat_file_operations = { const struct file_operations exfat_file_operations = {
......
...@@ -1076,7 +1076,7 @@ static int ext4_fc_perform_commit(journal_t *journal) ...@@ -1076,7 +1076,7 @@ static int ext4_fc_perform_commit(journal_t *journal)
* flush before we start writing fast commit blocks. * flush before we start writing fast commit blocks.
*/ */
if (journal->j_fs_dev != journal->j_dev) if (journal->j_fs_dev != journal->j_dev)
blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS); blkdev_issue_flush(journal->j_fs_dev);
blk_start_plug(&plug); blk_start_plug(&plug);
if (sbi->s_fc_bytes == 0) { if (sbi->s_fc_bytes == 0) {
...@@ -1535,7 +1535,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl) ...@@ -1535,7 +1535,7 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl)
out: out:
iput(inode); iput(inode);
if (!ret) if (!ret)
blkdev_issue_flush(sb->s_bdev, GFP_KERNEL); blkdev_issue_flush(sb->s_bdev);
return 0; return 0;
} }
......
...@@ -174,7 +174,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -174,7 +174,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
ret = ext4_fsync_journal(inode, datasync, &needs_barrier); ret = ext4_fsync_journal(inode, datasync, &needs_barrier);
if (needs_barrier) { if (needs_barrier) {
err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); err = blkdev_issue_flush(inode->i_sb->s_bdev);
if (!ret) if (!ret)
ret = err; ret = err;
} }
......
...@@ -1583,7 +1583,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, ...@@ -1583,7 +1583,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
if (ret < 0) if (ret < 0)
goto err_out; goto err_out;
if (barrier) if (barrier)
blkdev_issue_flush(sb->s_bdev, GFP_NOFS); blkdev_issue_flush(sb->s_bdev);
skip_zeroout: skip_zeroout:
ext4_lock_group(sb, group); ext4_lock_group(sb, group);
......
...@@ -5709,7 +5709,7 @@ static int ext4_sync_fs(struct super_block *sb, int wait) ...@@ -5709,7 +5709,7 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
needs_barrier = true; needs_barrier = true;
if (needs_barrier) { if (needs_barrier) {
int err; int err;
err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL); err = blkdev_issue_flush(sb->s_bdev);
if (!ret) if (!ret)
ret = err; ret = err;
} }
......
...@@ -195,7 +195,7 @@ int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync) ...@@ -195,7 +195,7 @@ int fat_file_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
if (err) if (err)
return err; return err;
return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); return blkdev_issue_flush(inode->i_sb->s_bdev);
} }
......
...@@ -340,7 +340,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end, ...@@ -340,7 +340,7 @@ int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
} }
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); blkdev_issue_flush(inode->i_sb->s_bdev);
inode_unlock(inode); inode_unlock(inode);
......
...@@ -239,7 +239,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait) ...@@ -239,7 +239,7 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
mutex_unlock(&sbi->vh_mutex); mutex_unlock(&sbi->vh_mutex);
if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags)) if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
blkdev_issue_flush(sb->s_bdev, GFP_KERNEL); blkdev_issue_flush(sb->s_bdev);
return error; return error;
} }
......
...@@ -416,7 +416,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal) ...@@ -416,7 +416,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
* jbd2_cleanup_journal_tail() doesn't get called all that often. * jbd2_cleanup_journal_tail() doesn't get called all that often.
*/ */
if (journal->j_flags & JBD2_BARRIER) if (journal->j_flags & JBD2_BARRIER)
blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS); blkdev_issue_flush(journal->j_fs_dev);
return __jbd2_update_log_tail(journal, first_tid, blocknr); return __jbd2_update_log_tail(journal, first_tid, blocknr);
} }
......
...@@ -825,7 +825,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) ...@@ -825,7 +825,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
if (commit_transaction->t_need_data_flush && if (commit_transaction->t_need_data_flush &&
(journal->j_fs_dev != journal->j_dev) && (journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER)) (journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS); blkdev_issue_flush(journal->j_fs_dev);
/* Done it all: now write the commit record asynchronously. */ /* Done it all: now write the commit record asynchronously. */
if (jbd2_has_feature_async_commit(journal)) { if (jbd2_has_feature_async_commit(journal)) {
...@@ -932,7 +932,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) ...@@ -932,7 +932,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
stats.run.rs_blocks_logged++; stats.run.rs_blocks_logged++;
if (jbd2_has_feature_async_commit(journal) && if (jbd2_has_feature_async_commit(journal) &&
journal->j_flags & JBD2_BARRIER) { journal->j_flags & JBD2_BARRIER) {
blkdev_issue_flush(journal->j_dev, GFP_NOFS); blkdev_issue_flush(journal->j_dev);
} }
if (err) if (err)
......
...@@ -326,7 +326,7 @@ int jbd2_journal_recover(journal_t *journal) ...@@ -326,7 +326,7 @@ int jbd2_journal_recover(journal_t *journal)
err = err2; err = err2;
/* Make sure all replayed data is on permanent storage */ /* Make sure all replayed data is on permanent storage */
if (journal->j_flags & JBD2_BARRIER) { if (journal->j_flags & JBD2_BARRIER) {
err2 = blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL); err2 = blkdev_issue_flush(journal->j_fs_dev);
if (!err) if (!err)
err = err2; err = err2;
} }
......
...@@ -1117,7 +1117,7 @@ int generic_file_fsync(struct file *file, loff_t start, loff_t end, ...@@ -1117,7 +1117,7 @@ int generic_file_fsync(struct file *file, loff_t start, loff_t end,
err = __generic_file_fsync(file, start, end, datasync); err = __generic_file_fsync(file, start, end, datasync);
if (err) if (err)
return err; return err;
return blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); return blkdev_issue_flush(inode->i_sb->s_bdev);
} }
EXPORT_SYMBOL(generic_file_fsync); EXPORT_SYMBOL(generic_file_fsync);
......
...@@ -375,7 +375,7 @@ static inline int nilfs_flush_device(struct the_nilfs *nilfs) ...@@ -375,7 +375,7 @@ static inline int nilfs_flush_device(struct the_nilfs *nilfs)
*/ */
smp_wmb(); smp_wmb();
err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL); err = blkdev_issue_flush(nilfs->ns_bdev);
if (err != -EIO) if (err != -EIO)
err = 0; err = 0;
return err; return err;
......
...@@ -194,7 +194,7 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end, ...@@ -194,7 +194,7 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
needs_barrier = true; needs_barrier = true;
err = jbd2_complete_transaction(journal, commit_tid); err = jbd2_complete_transaction(journal, commit_tid);
if (needs_barrier) { if (needs_barrier) {
ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); ret = blkdev_issue_flush(inode->i_sb->s_bdev);
if (!err) if (!err)
err = ret; err = ret;
} }
......
...@@ -159,7 +159,7 @@ static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end, ...@@ -159,7 +159,7 @@ static int reiserfs_sync_file(struct file *filp, loff_t start, loff_t end,
barrier_done = reiserfs_commit_for_inode(inode); barrier_done = reiserfs_commit_for_inode(inode);
reiserfs_write_unlock(inode->i_sb); reiserfs_write_unlock(inode->i_sb);
if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb)) if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); blkdev_issue_flush(inode->i_sb->s_bdev);
inode_unlock(inode); inode_unlock(inode);
if (barrier_done < 0) if (barrier_done < 0)
return barrier_done; return barrier_done;
......
...@@ -342,7 +342,7 @@ void ...@@ -342,7 +342,7 @@ void
xfs_blkdev_issue_flush( xfs_blkdev_issue_flush(
xfs_buftarg_t *buftarg) xfs_buftarg_t *buftarg)
{ {
blkdev_issue_flush(buftarg->bt_bdev, GFP_NOFS); blkdev_issue_flush(buftarg->bt_bdev);
} }
STATIC void STATIC void
......
...@@ -541,7 +541,7 @@ static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end, ...@@ -541,7 +541,7 @@ static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV) if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
ret = file_write_and_wait_range(file, start, end); ret = file_write_and_wait_range(file, start, end);
if (!ret) if (!ret)
ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL); ret = blkdev_issue_flush(inode->i_sb->s_bdev);
if (ret) if (ret)
zonefs_io_error(inode, true); zonefs_io_error(inode, true);
......
...@@ -1288,7 +1288,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) ...@@ -1288,7 +1288,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
!list_empty(&plug->cb_list)); !list_empty(&plug->cb_list));
} }
int blkdev_issue_flush(struct block_device *, gfp_t); int blkdev_issue_flush(struct block_device *bdev);
long nr_blockdev_pages(void); long nr_blockdev_pages(void);
#else /* CONFIG_BLOCK */ #else /* CONFIG_BLOCK */
struct blk_plug { struct blk_plug {
...@@ -1316,7 +1316,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) ...@@ -1316,7 +1316,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
return false; return false;
} }
static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask) static inline int blkdev_issue_flush(struct block_device *bdev)
{ {
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment