Commit 99c88bc2 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] cleanup in read_cache_pages()

Patch from Nikita Danilov <Nikita@Namesys.COM>

read_cache_pages() is passed a bunch of pages to start I/O against and it is
supposed to consume all those pages.  But if there is an I/O error, someone
need to throw away the unused pages.

At present the single user of read_cache_pages() (nfs_readpages) does that
cleanup by hand.  But it should be done in the core kernel.
parent cecee739
...@@ -390,11 +390,6 @@ nfs_readpages(struct file *filp, struct address_space *mapping, ...@@ -390,11 +390,6 @@ nfs_readpages(struct file *filp, struct address_space *mapping,
is_sync ? readpage_sync_filler : is_sync ? readpage_sync_filler :
readpage_async_filler, readpage_async_filler,
&desc); &desc);
while (!list_empty(pages)) {
struct page *page = list_entry(pages->prev, struct page, list);
list_del(&page->list);
page_cache_release(page);
}
if (!list_empty(&head)) { if (!list_empty(&head)) {
int err = nfs_pagein_list(&head, server->rpages); int err = nfs_pagein_list(&head, server->rpages);
if (!ret) if (!ret)
......
...@@ -42,6 +42,8 @@ static inline unsigned long get_min_readahead(struct file_ra_state *ra) ...@@ -42,6 +42,8 @@ static inline unsigned long get_min_readahead(struct file_ra_state *ra)
return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE; return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
} }
#define list_to_page(head) (list_entry((head)->prev, struct page, list))
/** /**
* read_cache_pages - populate an address space with some pages, and * read_cache_pages - populate an address space with some pages, and
* start reads against them. * start reads against them.
...@@ -63,7 +65,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, ...@@ -63,7 +65,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
pagevec_init(&lru_pvec, 0); pagevec_init(&lru_pvec, 0);
while (!list_empty(pages)) { while (!list_empty(pages)) {
page = list_entry(pages->prev, struct page, list); page = list_to_page(pages);
list_del(&page->list); list_del(&page->list);
if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
page_cache_release(page); page_cache_release(page);
...@@ -72,8 +74,16 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, ...@@ -72,8 +74,16 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
ret = filler(data, page); ret = filler(data, page);
if (!pagevec_add(&lru_pvec, page)) if (!pagevec_add(&lru_pvec, page))
__pagevec_lru_add(&lru_pvec); __pagevec_lru_add(&lru_pvec);
if (ret) if (ret) {
while (!list_empty(pages)) {
struct page *victim;
victim = list_to_page(pages);
list_del(&victim->list);
page_cache_release(victim);
}
break; break;
}
} }
pagevec_lru_add(&lru_pvec); pagevec_lru_add(&lru_pvec);
return ret; return ret;
...@@ -85,13 +95,12 @@ static int read_pages(struct address_space *mapping, struct file *filp, ...@@ -85,13 +95,12 @@ static int read_pages(struct address_space *mapping, struct file *filp,
unsigned page_idx; unsigned page_idx;
struct pagevec lru_pvec; struct pagevec lru_pvec;
pagevec_init(&lru_pvec, 0);
if (mapping->a_ops->readpages) if (mapping->a_ops->readpages)
return mapping->a_ops->readpages(filp, mapping, pages, nr_pages); return mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
pagevec_init(&lru_pvec, 0);
for (page_idx = 0; page_idx < nr_pages; page_idx++) { for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_entry(pages->prev, struct page, list); struct page *page = list_to_page(pages);
list_del(&page->list); list_del(&page->list);
if (!add_to_page_cache(page, mapping, if (!add_to_page_cache(page, mapping,
page->index, GFP_KERNEL)) { page->index, GFP_KERNEL)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment