Commit edadfb10 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

writeback: split writeback_inodes_wb

The case where we have a superblock doesn't require a loop here as we scan
over all inodes in writeback_sb_inodes. Split it out into a separate helper
to make the code simpler.  This also allows to get rid of the sb member in
struct writeback_control, which was rather out of place there.

Also update the comments in writeback_sb_inodes that explain the handling
of inodes from wrong superblocks.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 9c3a8ee8
...@@ -554,29 +554,41 @@ static bool pin_sb_for_writeback(struct super_block *sb) ...@@ -554,29 +554,41 @@ static bool pin_sb_for_writeback(struct super_block *sb)
/* /*
* Write a portion of b_io inodes which belong to @sb. * Write a portion of b_io inodes which belong to @sb.
* If @wbc->sb != NULL, then find and write all such *
* If @only_this_sb is true, then find and write all such
* inodes. Otherwise write only ones which go sequentially * inodes. Otherwise write only ones which go sequentially
* in reverse order. * in reverse order.
*
* Return 1, if the caller writeback routine should be * Return 1, if the caller writeback routine should be
* interrupted. Otherwise return 0. * interrupted. Otherwise return 0.
*/ */
static int writeback_sb_inodes(struct super_block *sb, static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
struct bdi_writeback *wb, struct writeback_control *wbc, bool only_this_sb)
struct writeback_control *wbc)
{ {
while (!list_empty(&wb->b_io)) { while (!list_empty(&wb->b_io)) {
long pages_skipped; long pages_skipped;
struct inode *inode = list_entry(wb->b_io.prev, struct inode *inode = list_entry(wb->b_io.prev,
struct inode, i_list); struct inode, i_list);
if (wbc->sb && sb != inode->i_sb) {
/* super block given and doesn't if (inode->i_sb != sb) {
match, skip this inode */ if (only_this_sb) {
/*
* We only want to write back data for this
* superblock, move all inodes not belonging
* to it back onto the dirty list.
*/
redirty_tail(inode); redirty_tail(inode);
continue; continue;
} }
if (sb != inode->i_sb)
/* finish with this superblock */ /*
* The inode belongs to a different superblock.
* Bounce back to the caller to unpin this and
* pin the next superblock.
*/
return 0; return 0;
}
if (inode->i_state & (I_NEW | I_WILL_FREE)) { if (inode->i_state & (I_NEW | I_WILL_FREE)) {
requeue_io(inode); requeue_io(inode);
continue; continue;
...@@ -629,29 +641,12 @@ void writeback_inodes_wb(struct bdi_writeback *wb, ...@@ -629,29 +641,12 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
struct inode, i_list); struct inode, i_list);
struct super_block *sb = inode->i_sb; struct super_block *sb = inode->i_sb;
if (wbc->sb) {
/*
* We are requested to write out inodes for a specific
* superblock. This means we already have s_umount
* taken by the caller which also waits for us to
* complete the writeout.
*/
if (sb != wbc->sb) {
redirty_tail(inode);
continue;
}
WARN_ON(!rwsem_is_locked(&sb->s_umount));
ret = writeback_sb_inodes(sb, wb, wbc);
} else {
if (!pin_sb_for_writeback(sb)) { if (!pin_sb_for_writeback(sb)) {
requeue_io(inode); requeue_io(inode);
continue; continue;
} }
ret = writeback_sb_inodes(sb, wb, wbc); ret = writeback_sb_inodes(sb, wb, wbc, false);
drop_super(sb); drop_super(sb);
}
if (ret) if (ret)
break; break;
...@@ -660,6 +655,19 @@ void writeback_inodes_wb(struct bdi_writeback *wb, ...@@ -660,6 +655,19 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
/* Leave any unwritten inodes on b_io */ /* Leave any unwritten inodes on b_io */
} }
static void __writeback_inodes_sb(struct super_block *sb,
struct bdi_writeback *wb, struct writeback_control *wbc)
{
WARN_ON(!rwsem_is_locked(&sb->s_umount));
wbc->wb_start = jiffies; /* livelock avoidance */
spin_lock(&inode_lock);
if (!wbc->for_kupdate || list_empty(&wb->b_io))
queue_io(wb, wbc->older_than_this);
writeback_sb_inodes(sb, wb, wbc, true);
spin_unlock(&inode_lock);
}
/* /*
* The maximum number of pages to writeout in a single bdi flush/kupdate * The maximum number of pages to writeout in a single bdi flush/kupdate
* operation. We do this so we don't hold I_SYNC against an inode for * operation. We do this so we don't hold I_SYNC against an inode for
...@@ -698,7 +706,6 @@ static long wb_writeback(struct bdi_writeback *wb, ...@@ -698,7 +706,6 @@ static long wb_writeback(struct bdi_writeback *wb,
struct wb_writeback_args *args) struct wb_writeback_args *args)
{ {
struct writeback_control wbc = { struct writeback_control wbc = {
.sb = args->sb,
.sync_mode = args->sync_mode, .sync_mode = args->sync_mode,
.older_than_this = NULL, .older_than_this = NULL,
.for_kupdate = args->for_kupdate, .for_kupdate = args->for_kupdate,
...@@ -736,6 +743,9 @@ static long wb_writeback(struct bdi_writeback *wb, ...@@ -736,6 +743,9 @@ static long wb_writeback(struct bdi_writeback *wb,
wbc.more_io = 0; wbc.more_io = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES; wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0; wbc.pages_skipped = 0;
if (args->sb)
__writeback_inodes_sb(args->sb, wb, &wbc);
else
writeback_inodes_wb(wb, &wbc); writeback_inodes_wb(wb, &wbc);
args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
......
...@@ -27,8 +27,6 @@ enum writeback_sync_modes { ...@@ -27,8 +27,6 @@ enum writeback_sync_modes {
* in a manner such that unspecified fields are set to zero. * in a manner such that unspecified fields are set to zero.
*/ */
struct writeback_control { struct writeback_control {
struct super_block *sb; /* if !NULL, only write inodes from
this super_block */
enum writeback_sync_modes sync_mode; enum writeback_sync_modes sync_mode;
unsigned long *older_than_this; /* If !NULL, only write back inodes unsigned long *older_than_this; /* If !NULL, only write back inodes
older than this */ older than this */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment