Commit 3c4d7165 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

writeback: queue work on stack in writeback_inodes_sb

If we want to rely on s_umount in the caller we need to wait for completion
of the I/O submission before returning to the caller.  Refactor
bdi_sync_writeback into a bdi_queue_work_onstack helper and use it for this
case.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 7f0e7bed
...@@ -178,30 +178,22 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi, ...@@ -178,30 +178,22 @@ static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
} }
/** /**
* bdi_sync_writeback - start and wait for writeback * bdi_queue_work_onstack - start and wait for writeback
* @bdi: the backing device to write from
* @sb: write inodes from this super_block * @sb: write inodes from this super_block
* *
* Description: * Description:
* This does WB_SYNC_ALL data integrity writeback and waits for the * This function initiates writeback and waits for the operation to
* IO to complete. Callers must hold the sb s_umount semaphore for * complete. Callers must hold the sb s_umount semaphore for
* reading, to avoid having the super disappear before we are done. * reading, to avoid having the super disappear before we are done.
*/ */
static void bdi_sync_writeback(struct backing_dev_info *bdi, static void bdi_queue_work_onstack(struct wb_writeback_args *args)
struct super_block *sb)
{ {
struct wb_writeback_args args = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
.range_cyclic = 0,
};
struct bdi_work work; struct bdi_work work;
bdi_work_init(&work, &args); bdi_work_init(&work, args);
__set_bit(WS_ONSTACK, &work.state); __set_bit(WS_ONSTACK, &work.state);
bdi_queue_work(bdi, &work); bdi_queue_work(args->sb->s_bdi, &work);
bdi_wait_on_work_done(&work); bdi_wait_on_work_done(&work);
} }
...@@ -944,7 +936,7 @@ int bdi_writeback_task(struct bdi_writeback *wb) ...@@ -944,7 +936,7 @@ int bdi_writeback_task(struct bdi_writeback *wb)
/* /*
* Schedule writeback for all backing devices. This does WB_SYNC_NONE * Schedule writeback for all backing devices. This does WB_SYNC_NONE
* writeback, for integrity writeback see bdi_sync_writeback(). * writeback, for integrity writeback see bdi_queue_work_onstack().
*/ */
static void bdi_writeback_all(struct super_block *sb, long nr_pages) static void bdi_writeback_all(struct super_block *sb, long nr_pages)
{ {
...@@ -1183,12 +1175,15 @@ void writeback_inodes_sb(struct super_block *sb) ...@@ -1183,12 +1175,15 @@ void writeback_inodes_sb(struct super_block *sb)
{ {
unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
long nr_to_write; struct wb_writeback_args args = {
.sb = sb,
.sync_mode = WB_SYNC_NONE,
};
nr_to_write = nr_dirty + nr_unstable + args.nr_pages = nr_dirty + nr_unstable +
(inodes_stat.nr_inodes - inodes_stat.nr_unused); (inodes_stat.nr_inodes - inodes_stat.nr_unused);
bdi_start_writeback(sb->s_bdi, sb, nr_to_write); bdi_queue_work_onstack(&args);
} }
EXPORT_SYMBOL(writeback_inodes_sb); EXPORT_SYMBOL(writeback_inodes_sb);
...@@ -1218,7 +1213,14 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle); ...@@ -1218,7 +1213,14 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle);
*/ */
void sync_inodes_sb(struct super_block *sb) void sync_inodes_sb(struct super_block *sb)
{ {
bdi_sync_writeback(sb->s_bdi, sb); struct wb_writeback_args args = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
.range_cyclic = 0,
};
bdi_queue_work_onstack(&args);
wait_sb_inodes(sb); wait_sb_inodes(sb);
} }
EXPORT_SYMBOL(sync_inodes_sb); EXPORT_SYMBOL(sync_inodes_sb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment