Commit 19901165 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.16/inode-sync-2021-10-29' of git://git.kernel.dk/linux-block

Pull block inode sync updates from Jens Axboe:
 "This contains improvements to how bdev inode syncing is handled,
  unifying the API"

* tag 'for-5.16/inode-sync-2021-10-29' of git://git.kernel.dk/linux-block:
  block: simplify the block device syncing code
  ntfs3: use sync_blockdev_nowait
  fat: use sync_blockdev_nowait
  btrfs: use sync_blockdev
  xen-blkback: use sync_blockdev
  block: remove __sync_blockdev
  fs: remove __sync_filesystem
parents b6773cdb 1e03a36b
......@@ -185,14 +185,13 @@ int sb_min_blocksize(struct super_block *sb, int size)
EXPORT_SYMBOL(sb_min_blocksize);
int __sync_blockdev(struct block_device *bdev, int wait)
int sync_blockdev_nowait(struct block_device *bdev)
{
if (!bdev)
return 0;
if (!wait)
return filemap_flush(bdev->bd_inode->i_mapping);
return filemap_write_and_wait(bdev->bd_inode->i_mapping);
}
EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
/*
* Write out and wait upon all the dirty data associated with a block
......@@ -200,7 +199,9 @@ int __sync_blockdev(struct block_device *bdev, int wait)
*/
int sync_blockdev(struct block_device *bdev)
{
return __sync_blockdev(bdev, 1);
if (!bdev)
return 0;
return filemap_write_and_wait(bdev->bd_inode->i_mapping);
}
EXPORT_SYMBOL(sync_blockdev);
......@@ -1020,7 +1021,7 @@ int __invalidate_device(struct block_device *bdev, bool kill_dirty)
}
EXPORT_SYMBOL(__invalidate_device);
void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
void sync_bdevs(bool wait)
{
struct inode *inode, *old_inode = NULL;
......@@ -1051,8 +1052,19 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
bdev = I_BDEV(inode);
mutex_lock(&bdev->bd_disk->open_mutex);
if (bdev->bd_openers)
func(bdev, arg);
if (!bdev->bd_openers) {
; /* skip */
} else if (wait) {
/*
* We keep the error status of individual mapping so
* that applications can catch the writeback error using
* fsync(2). See filemap_fdatawait_keep_errors() for
* details.
*/
filemap_fdatawait_keep_errors(inode->i_mapping);
} else {
filemap_fdatawrite(inode->i_mapping);
}
mutex_unlock(&bdev->bd_disk->open_mutex);
spin_lock(&blockdev_superblock->s_inode_list_lock);
......
......@@ -98,7 +98,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
return;
}
err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
err = sync_blockdev(blkif->vbd.bdev);
if (err) {
xenbus_dev_error(blkif->be->dev, err, "block flush");
return;
......
......@@ -508,7 +508,7 @@ btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
}
if (flush)
filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
sync_blockdev(*bdev);
ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
if (ret) {
blkdev_put(*bdev, flags);
......
......@@ -1940,10 +1940,8 @@ int fat_flush_inodes(struct super_block *sb, struct inode *i1, struct inode *i2)
ret = writeback_inode(i1);
if (!ret && i2)
ret = writeback_inode(i2);
if (!ret) {
struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
ret = filemap_flush(mapping);
}
if (!ret)
ret = sync_blockdev_nowait(sb->s_bdev);
return ret;
}
EXPORT_SYMBOL_GPL(fat_flush_inodes);
......
......@@ -23,22 +23,11 @@ struct pipe_inode_info;
#ifdef CONFIG_BLOCK
extern void __init bdev_cache_init(void);
extern int __sync_blockdev(struct block_device *bdev, int wait);
void iterate_bdevs(void (*)(struct block_device *, void *), void *);
void emergency_thaw_bdev(struct super_block *sb);
#else
static inline void bdev_cache_init(void)
{
}
static inline int __sync_blockdev(struct block_device *bdev, int wait)
{
return 0;
}
static inline void iterate_bdevs(void (*f)(struct block_device *, void *),
void *arg)
{
}
static inline int emergency_thaw_bdev(struct super_block *sb)
{
return 0;
......
......@@ -1046,7 +1046,7 @@ int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
if (!ret && i2)
ret = writeback_inode(i2);
if (!ret)
ret = filemap_flush(sb->s_bdev->bd_inode->i_mapping);
ret = sync_blockdev_nowait(sb->s_bdev);
return ret;
}
......
......@@ -3,6 +3,7 @@
* High-level sync()-related operations
*/
#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/fs.h>
......@@ -21,25 +22,6 @@
#define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \
SYNC_FILE_RANGE_WAIT_AFTER)
/*
* Do the filesystem syncing work. For simple filesystems
* writeback_inodes_sb(sb) just dirties buffers with inodes so we have to
* submit IO for these buffers via __sync_blockdev(). This also speeds up the
* wait == 1 case since in that case write_inode() functions do
* sync_dirty_buffer() and thus effectively write one block at a time.
*/
static int __sync_filesystem(struct super_block *sb, int wait)
{
if (wait)
sync_inodes_sb(sb);
else
writeback_inodes_sb(sb, WB_REASON_SYNC);
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, wait);
return __sync_blockdev(sb->s_bdev, wait);
}
/*
* Write out and wait upon all dirty data associated with this
* superblock. Filesystem data as well as the underlying block
......@@ -61,10 +43,25 @@ int sync_filesystem(struct super_block *sb)
if (sb_rdonly(sb))
return 0;
ret = __sync_filesystem(sb, 0);
/*
* Do the filesystem syncing work. For simple filesystems
* writeback_inodes_sb(sb) just dirties buffers with inodes so we have
* to submit I/O for these buffers via sync_blockdev(). This also
* speeds up the wait == 1 case since in that case write_inode()
* methods call sync_dirty_buffer() and thus effectively write one block
* at a time.
*/
writeback_inodes_sb(sb, WB_REASON_SYNC);
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, 0);
ret = sync_blockdev_nowait(sb->s_bdev);
if (ret < 0)
return ret;
return __sync_filesystem(sb, 1);
sync_inodes_sb(sb);
if (sb->s_op->sync_fs)
sb->s_op->sync_fs(sb, 1);
return sync_blockdev(sb->s_bdev);
}
EXPORT_SYMBOL(sync_filesystem);
......@@ -81,21 +78,6 @@ static void sync_fs_one_sb(struct super_block *sb, void *arg)
sb->s_op->sync_fs(sb, *(int *)arg);
}
static void fdatawrite_one_bdev(struct block_device *bdev, void *arg)
{
filemap_fdatawrite(bdev->bd_inode->i_mapping);
}
static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
{
/*
* We keep the error status of individual mapping so that
* applications can catch the writeback error using fsync(2).
* See filemap_fdatawait_keep_errors() for details.
*/
filemap_fdatawait_keep_errors(bdev->bd_inode->i_mapping);
}
/*
* Sync everything. We start by waking flusher threads so that most of
* writeback runs on all devices in parallel. Then we sync all inodes reliably
......@@ -114,8 +96,8 @@ void ksys_sync(void)
iterate_supers(sync_inodes_one_sb, NULL);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &wait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
iterate_bdevs(fdatawait_one_bdev, NULL);
sync_bdevs(false);
sync_bdevs(true);
if (unlikely(laptop_mode))
laptop_sync_completion();
}
......@@ -136,10 +118,10 @@ static void do_sync_work(struct work_struct *work)
*/
iterate_supers(sync_inodes_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
sync_bdevs(false);
iterate_supers(sync_inodes_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
sync_bdevs(false);
printk("Emergency Sync complete\n");
kfree(work);
}
......
......@@ -1304,6 +1304,8 @@ int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart,
#ifdef CONFIG_BLOCK
void invalidate_bdev(struct block_device *bdev);
int sync_blockdev(struct block_device *bdev);
int sync_blockdev_nowait(struct block_device *bdev);
void sync_bdevs(bool wait);
#else
static inline void invalidate_bdev(struct block_device *bdev)
{
......@@ -1312,6 +1314,13 @@ static inline int sync_blockdev(struct block_device *bdev)
{
return 0;
}
static inline int sync_blockdev_nowait(struct block_device *bdev)
{
return 0;
}
static inline void sync_bdevs(bool wait)
{
}
#endif
int fsync_bdev(struct block_device *bdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment