Commit f07812ca authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] Writeback page range hint

Modify mpage_writepages to optionally only write back dirty pages within a
specified range in a file (as in the case of O_SYNC).  Cheat a little to avoid
changes to prototypes of aops - just put the <start, end> hint into the
writeback_control struct instead.  If <start, end> are not set, then default
to writing back all the mapping's dirty pages.
Signed-off-by: default avatarSuparna Bhattacharya <suparna@in.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent d4f9d02b
...@@ -627,7 +627,9 @@ mpage_writepages(struct address_space *mapping, ...@@ -627,7 +627,9 @@ mpage_writepages(struct address_space *mapping,
struct pagevec pvec; struct pagevec pvec;
int nr_pages; int nr_pages;
pgoff_t index; pgoff_t index;
pgoff_t end = -1; /* Inclusive */
int scanned = 0; int scanned = 0;
int is_range = 0;
if (wbc->nonblocking && bdi_write_congested(bdi)) { if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1; wbc->encountered_congestion = 1;
...@@ -645,9 +647,16 @@ mpage_writepages(struct address_space *mapping, ...@@ -645,9 +647,16 @@ mpage_writepages(struct address_space *mapping,
index = 0; /* whole-file sweep */ index = 0; /* whole-file sweep */
scanned = 1; scanned = 1;
} }
if (wbc->start || wbc->end) {
index = wbc->start >> PAGE_CACHE_SHIFT;
end = wbc->end >> PAGE_CACHE_SHIFT;
is_range = 1;
scanned = 1;
}
retry: retry:
while (!done && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, while (!done && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
PAGECACHE_TAG_DIRTY, PAGEVEC_SIZE))) { PAGECACHE_TAG_DIRTY,
min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
unsigned i; unsigned i;
scanned = 1; scanned = 1;
...@@ -664,10 +673,21 @@ mpage_writepages(struct address_space *mapping, ...@@ -664,10 +673,21 @@ mpage_writepages(struct address_space *mapping,
lock_page(page); lock_page(page);
if (unlikely(page->mapping != mapping)) {
unlock_page(page);
continue;
}
if (unlikely(is_range) && page->index > end) {
done = 1;
unlock_page(page);
continue;
}
if (wbc->sync_mode != WB_SYNC_NONE) if (wbc->sync_mode != WB_SYNC_NONE)
wait_on_page_writeback(page); wait_on_page_writeback(page);
if (page->mapping != mapping || PageWriteback(page) || if (PageWriteback(page) ||
!clear_page_dirty_for_io(page)) { !clear_page_dirty_for_io(page)) {
unlock_page(page); unlock_page(page);
continue; continue;
...@@ -706,7 +726,8 @@ mpage_writepages(struct address_space *mapping, ...@@ -706,7 +726,8 @@ mpage_writepages(struct address_space *mapping,
index = 0; index = 0;
goto retry; goto retry;
} }
mapping->writeback_index = index; if (!is_range)
mapping->writeback_index = index;
if (bio) if (bio)
mpage_bio_submit(WRITE, bio); mpage_bio_submit(WRITE, bio);
return ret; return ret;
......
...@@ -29,7 +29,9 @@ enum writeback_sync_modes { ...@@ -29,7 +29,9 @@ enum writeback_sync_modes {
}; };
/* /*
* A control structure which tells the writeback code what to do * A control structure which tells the writeback code what to do. These are
* always on the stack, and hence need no locking. They are always initialised
* in a manner such that unspecified fields are set to zero.
*/ */
struct writeback_control { struct writeback_control {
struct backing_dev_info *bdi; /* If !NULL, only write back this struct backing_dev_info *bdi; /* If !NULL, only write back this
...@@ -40,10 +42,19 @@ struct writeback_control { ...@@ -40,10 +42,19 @@ struct writeback_control {
long nr_to_write; /* Write this many pages, and decrement long nr_to_write; /* Write this many pages, and decrement
this for each page written */ this for each page written */
long pages_skipped; /* Pages which were not written */ long pages_skipped; /* Pages which were not written */
int nonblocking; /* Don't get stuck on request queues */
int encountered_congestion; /* An output: a queue is full */ /*
int for_kupdate; /* A kupdate writeback */ * For a_ops->writepages(): is start or end are non-zero then this is
int for_reclaim; /* Invoked from the page allocator */ * a hint that the filesystem need only write out the pages inside that
* byterange. The byte at `end' is included in the writeout request.
*/
loff_t start;
loff_t end;
int nonblocking:1; /* Don't get stuck on request queues */
int encountered_congestion:1; /* An output: a queue is full */
int for_kupdate:1; /* A kupdate writeback */
int for_reclaim:1; /* Invoked from the page allocator */
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment