Commit 2e6883bd authored by Fengguang Wu's avatar Fengguang Wu Committed by Linus Torvalds

writeback: introduce writeback_control.more_io to indicate more io

After making dirty a 100M file, the normal behavior is to start the writeback
for all data after 30s delays.  But sometimes the following happens instead:

	- after 30s:    ~4M
	- after 5s:     ~4M
	- after 5s:     all remaining 92M

Some analyze shows that the internal io dispatch queues goes like this:

		s_io            s_more_io
		-------------------------
	1)	100M,1K         0
	2)	1K              96M
	3)	0               96M

1) initial state with a 100M file and a 1K file
2) 4M written, nr_to_write <= 0, so write more
3) 1K written, nr_to_write > 0, no more writes(BUG)

nr_to_write > 0 in (3) fools the upper layer to think that data have all been
written out.  The big dirty file is actually still sitting in s_more_io.  We
cannot simply splice s_more_io back to s_io as soon as s_io becomes empty, and
let the loop in generic_sync_sb_inodes() continue: this may starve newly
expired inodes in s_dirty.  It is also not an option to draw inodes from both
s_more_io and s_dirty, an let the loop go on: this might lead to live locks,
and might also starve other superblocks in sync time(well kupdate may still
starve some superblocks, that's another bug).

We have to return when a full scan of s_io completes.  So nr_to_write > 0 does
not necessarily mean that "all data are written".  This patch introduces a
flag writeback_control.more_io to indicate this situation.  With it the big
dirty file no longer has to wait for the next kupdate invocation 5s later.

Cc: David Chinner <dgc@sgi.com>
Cc: Ken Chen <kenchen@google.com>
Signed-off-by: default avatarFengguang Wu <wfg@mail.ustc.edu.cn>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1f7decf6
...@@ -473,6 +473,8 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc) ...@@ -473,6 +473,8 @@ sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
if (wbc->nr_to_write <= 0) if (wbc->nr_to_write <= 0)
break; break;
} }
if (!list_empty(&sb->s_more_io))
wbc->more_io = 1;
return; /* Leave any unwritten inodes on s_io */ return; /* Leave any unwritten inodes on s_io */
} }
......
...@@ -62,6 +62,7 @@ struct writeback_control { ...@@ -62,6 +62,7 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned for_writepages:1; /* This is a writepages() call */ unsigned for_writepages:1; /* This is a writepages() call */
unsigned range_cyclic:1; /* range_start is cyclic */ unsigned range_cyclic:1; /* range_start is cyclic */
unsigned more_io:1; /* more io to be dispatched */
}; };
/* /*
......
...@@ -553,6 +553,7 @@ static void background_writeout(unsigned long _min_pages) ...@@ -553,6 +553,7 @@ static void background_writeout(unsigned long _min_pages)
global_page_state(NR_UNSTABLE_NFS) < background_thresh global_page_state(NR_UNSTABLE_NFS) < background_thresh
&& min_pages <= 0) && min_pages <= 0)
break; break;
wbc.more_io = 0;
wbc.encountered_congestion = 0; wbc.encountered_congestion = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES; wbc.nr_to_write = MAX_WRITEBACK_PAGES;
wbc.pages_skipped = 0; wbc.pages_skipped = 0;
...@@ -560,8 +561,9 @@ static void background_writeout(unsigned long _min_pages) ...@@ -560,8 +561,9 @@ static void background_writeout(unsigned long _min_pages)
min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
/* Wrote less than expected */ /* Wrote less than expected */
if (wbc.encountered_congestion || wbc.more_io)
congestion_wait(WRITE, HZ/10); congestion_wait(WRITE, HZ/10);
if (!wbc.encountered_congestion) else
break; break;
} }
} }
...@@ -626,11 +628,12 @@ static void wb_kupdate(unsigned long arg) ...@@ -626,11 +628,12 @@ static void wb_kupdate(unsigned long arg)
global_page_state(NR_UNSTABLE_NFS) + global_page_state(NR_UNSTABLE_NFS) +
(inodes_stat.nr_inodes - inodes_stat.nr_unused); (inodes_stat.nr_inodes - inodes_stat.nr_unused);
while (nr_to_write > 0) { while (nr_to_write > 0) {
wbc.more_io = 0;
wbc.encountered_congestion = 0; wbc.encountered_congestion = 0;
wbc.nr_to_write = MAX_WRITEBACK_PAGES; wbc.nr_to_write = MAX_WRITEBACK_PAGES;
writeback_inodes(&wbc); writeback_inodes(&wbc);
if (wbc.nr_to_write > 0) { if (wbc.nr_to_write > 0) {
if (wbc.encountered_congestion) if (wbc.encountered_congestion || wbc.more_io)
congestion_wait(WRITE, HZ/10); congestion_wait(WRITE, HZ/10);
else else
break; /* All the old data is written */ break; /* All the old data is written */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment