Commit f30a7d0c authored by Tejun Heo's avatar Tejun Heo Committed by Jens Axboe

writeback: restructure try_writeback_inodes_sb[_nr]()

try_writeback_inodes_sb_nr() wraps writeback_inodes_sb_nr() so that it
handles s_umount locking and skips if writeback is already in
progress.  The in progress test is performed on the root wb
(bdi_writeback) which isn't sufficient for cgroup writeback support.
The test must be done per-wb.

To prepare for the change, this patch factors out
__writeback_inodes_sb_nr() from writeback_inodes_sb_nr() and adds
@skip_if_busy and moves the in progress test right before queueing the
wb_writeback_work.  try_writeback_inodes_sb_nr() now just grabs
s_umount and invokes __writeback_inodes_sb_nr() with asserted
@skip_if_busy.  This way, later addition of multiple wb handling can
skip only the wb's which already have writeback in progress.

This swaps the order between in progress test and s_umount test which
can flip the return value when writeback is in progress and s_umount
is being held by someone else but this shouldn't cause any meaningful
difference.  It's a fringe condition and the return value is an
unsynchronized hint anyway.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 98754bf7
...@@ -1581,19 +1581,8 @@ static void wait_sb_inodes(struct super_block *sb) ...@@ -1581,19 +1581,8 @@ static void wait_sb_inodes(struct super_block *sb)
iput(old_inode); iput(old_inode);
} }
/** static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
* writeback_inodes_sb_nr - writeback dirty inodes from given super_block enum wb_reason reason, bool skip_if_busy)
* @sb: the superblock
* @nr: the number of pages to write
* @reason: reason why some writeback work initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
* for IO completion of submitted IO.
*/
void writeback_inodes_sb_nr(struct super_block *sb,
unsigned long nr,
enum wb_reason reason)
{ {
DEFINE_WB_COMPLETION_ONSTACK(done); DEFINE_WB_COMPLETION_ONSTACK(done);
struct wb_writeback_work work = { struct wb_writeback_work work = {
...@@ -1609,9 +1598,30 @@ void writeback_inodes_sb_nr(struct super_block *sb, ...@@ -1609,9 +1598,30 @@ void writeback_inodes_sb_nr(struct super_block *sb,
if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info) if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
return; return;
WARN_ON(!rwsem_is_locked(&sb->s_umount)); WARN_ON(!rwsem_is_locked(&sb->s_umount));
if (skip_if_busy && writeback_in_progress(&bdi->wb))
return;
wb_queue_work(&bdi->wb, &work); wb_queue_work(&bdi->wb, &work);
wb_wait_for_completion(bdi, &done); wb_wait_for_completion(bdi, &done);
} }
/**
* writeback_inodes_sb_nr - writeback dirty inodes from given super_block
* @sb: the superblock
* @nr: the number of pages to write
* @reason: reason why some writeback work initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
* for IO completion of submitted IO.
*/
void writeback_inodes_sb_nr(struct super_block *sb,
unsigned long nr,
enum wb_reason reason)
{
__writeback_inodes_sb_nr(sb, nr, reason, false);
}
EXPORT_SYMBOL(writeback_inodes_sb_nr); EXPORT_SYMBOL(writeback_inodes_sb_nr);
/** /**
...@@ -1638,19 +1648,15 @@ EXPORT_SYMBOL(writeback_inodes_sb); ...@@ -1638,19 +1648,15 @@ EXPORT_SYMBOL(writeback_inodes_sb);
* Invoke writeback_inodes_sb_nr if no writeback is currently underway. * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not. * Returns 1 if writeback was started, 0 if not.
*/ */
int try_to_writeback_inodes_sb_nr(struct super_block *sb, bool try_to_writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
unsigned long nr,
enum wb_reason reason) enum wb_reason reason)
{ {
if (writeback_in_progress(&sb->s_bdi->wb))
return 1;
if (!down_read_trylock(&sb->s_umount)) if (!down_read_trylock(&sb->s_umount))
return 0; return false;
writeback_inodes_sb_nr(sb, nr, reason); __writeback_inodes_sb_nr(sb, nr, reason, true);
up_read(&sb->s_umount); up_read(&sb->s_umount);
return 1; return true;
} }
EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr); EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
...@@ -1662,7 +1668,7 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr); ...@@ -1662,7 +1668,7 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
* Implement by try_to_writeback_inodes_sb_nr() * Implement by try_to_writeback_inodes_sb_nr()
* Returns 1 if writeback was started, 0 if not. * Returns 1 if writeback was started, 0 if not.
*/ */
int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) bool try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
{ {
return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
} }
......
...@@ -93,8 +93,8 @@ struct bdi_writeback; ...@@ -93,8 +93,8 @@ struct bdi_writeback;
void writeback_inodes_sb(struct super_block *, enum wb_reason reason); void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason); enum wb_reason reason);
int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason); bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr, bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason); enum wb_reason reason);
void sync_inodes_sb(struct super_block *); void sync_inodes_sb(struct super_block *);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment