Commit 1e03a36b authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: simplify the block device syncing code

Get rid of the indirections and just provide a sync_bdevs
helper for the generic sync code.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20211019062530.2174626-8-hch@lst.deSigned-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 680e667b
......@@ -1021,7 +1021,7 @@ int __invalidate_device(struct block_device *bdev, bool kill_dirty)
}
EXPORT_SYMBOL(__invalidate_device);
void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
void sync_bdevs(bool wait)
{
struct inode *inode, *old_inode = NULL;
......@@ -1052,8 +1052,19 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
bdev = I_BDEV(inode);
mutex_lock(&bdev->bd_disk->open_mutex);
if (bdev->bd_openers)
func(bdev, arg);
if (!bdev->bd_openers) {
; /* skip */
} else if (wait) {
/*
* We keep the error status of individual mapping so
* that applications can catch the writeback error using
* fsync(2). See filemap_fdatawait_keep_errors() for
* details.
*/
filemap_fdatawait_keep_errors(inode->i_mapping);
} else {
filemap_fdatawrite(inode->i_mapping);
}
mutex_unlock(&bdev->bd_disk->open_mutex);
spin_lock(&blockdev_superblock->s_inode_list_lock);
......
......@@ -23,17 +23,11 @@ struct pipe_inode_info;
#ifdef CONFIG_BLOCK
extern void __init bdev_cache_init(void);
void iterate_bdevs(void (*)(struct block_device *, void *), void *);
void emergency_thaw_bdev(struct super_block *sb);
#else
static inline void bdev_cache_init(void)
{
}
static inline void iterate_bdevs(void (*f)(struct block_device *, void *),
void *arg)
{
}
static inline int emergency_thaw_bdev(struct super_block *sb)
{
return 0;
......
......@@ -78,21 +78,6 @@ static void sync_fs_one_sb(struct super_block *sb, void *arg)
sb->s_op->sync_fs(sb, *(int *)arg);
}
static void fdatawrite_one_bdev(struct block_device *bdev, void *arg)
{
filemap_fdatawrite(bdev->bd_inode->i_mapping);
}
static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
{
/*
* We keep the error status of individual mapping so that
* applications can catch the writeback error using fsync(2).
* See filemap_fdatawait_keep_errors() for details.
*/
filemap_fdatawait_keep_errors(bdev->bd_inode->i_mapping);
}
/*
* Sync everything. We start by waking flusher threads so that most of
* writeback runs on all devices in parallel. Then we sync all inodes reliably
......@@ -111,8 +96,8 @@ void ksys_sync(void)
iterate_supers(sync_inodes_one_sb, NULL);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &wait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
iterate_bdevs(fdatawait_one_bdev, NULL);
sync_bdevs(false);
sync_bdevs(true);
if (unlikely(laptop_mode))
laptop_sync_completion();
}
......@@ -133,10 +118,10 @@ static void do_sync_work(struct work_struct *work)
*/
iterate_supers(sync_inodes_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
sync_bdevs(false);
iterate_supers(sync_inodes_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
sync_bdevs(false);
printk("Emergency Sync complete\n");
kfree(work);
}
......
......@@ -1267,6 +1267,7 @@ int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
void invalidate_bdev(struct block_device *bdev);
int sync_blockdev(struct block_device *bdev);
int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
......@@ -1279,6 +1280,9 @@ static inline int sync_blockdev_nowait(struct block_device *bdev)
{
return 0;
}
static inline void sync_bdevs(bool wait)
{
}
#endif
int fsync_bdev(struct block_device *bdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment