Commit d72dc8a2 authored by Jan Kara's avatar Jan Kara Committed by Linus Torvalds

mm: make pagevec_lookup() update index

Make pagevec_lookup() (and underlying find_get_pages()) update index to
the next page where iteration should continue.  Most callers want this
and also pagevec_lookup_tag() already does this.

Link: http://lkml.kernel.org/r/20170726114704.7626-3-jack@suse.czSigned-off-by: default avatarJan Kara <jack@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 26b433d0
......@@ -1633,13 +1633,12 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
end = (block + len - 1) >> (PAGE_SHIFT - bd_inode->i_blkbits);
pagevec_init(&pvec, 0);
while (index <= end && pagevec_lookup(&pvec, bd_mapping, index,
while (index <= end && pagevec_lookup(&pvec, bd_mapping, &index,
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
index = page->index;
if (index > end)
if (page->index > end)
break;
if (!page_has_buffers(page))
continue;
......@@ -1670,7 +1669,6 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
}
pagevec_release(&pvec);
cond_resched();
index++;
}
}
EXPORT_SYMBOL(clean_bdev_aliases);
......@@ -3552,7 +3550,8 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
unsigned want, nr_pages, i;
want = min_t(unsigned, end - index, PAGEVEC_SIZE);
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index, want);
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, &index,
want);
if (nr_pages == 0)
break;
......@@ -3594,7 +3593,6 @@ page_cache_seek_hole_data(struct inode *inode, loff_t offset, loff_t length,
if (nr_pages < want)
break;
index = pvec.pages[i - 1]->index + 1;
pagevec_release(&pvec);
} while (index < end);
......
......@@ -468,7 +468,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
unsigned long nr_pages;
num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, &index,
(pgoff_t)num);
if (nr_pages == 0)
break;
......@@ -536,8 +536,6 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
/* The no. of pages is less than our desired, we are done. */
if (nr_pages < num)
break;
index = pvec.pages[i - 1]->index + 1;
pagevec_release(&pvec);
} while (index <= end);
......
......@@ -1720,7 +1720,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
pagevec_init(&pvec, 0);
while (index <= end) {
nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
nr_pages = pagevec_lookup(&pvec, mapping, &index, PAGEVEC_SIZE);
if (nr_pages == 0)
break;
for (i = 0; i < nr_pages; i++) {
......@@ -1737,7 +1737,6 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd,
}
unlock_page(page);
}
index = pvec.pages[nr_pages - 1]->index + 1;
pagevec_release(&pvec);
}
}
......@@ -2348,7 +2347,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
pagevec_init(&pvec, 0);
while (start <= end) {
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, &start,
PAGEVEC_SIZE);
if (nr_pages == 0)
break;
......@@ -2357,8 +2356,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
if (page->index > end)
break;
/* Up to 'end' pages must be contiguous */
BUG_ON(page->index != start);
bh = head = page_buffers(page);
do {
if (lblk < mpd->map.m_lblk)
......@@ -2403,7 +2400,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
pagevec_release(&pvec);
return err;
}
start++;
}
pagevec_release(&pvec);
}
......
......@@ -1178,11 +1178,10 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
pagevec_init(&pvec, 0);
next = 0;
do {
if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
if (!pagevec_lookup(&pvec, mapping, &next, PAGEVEC_SIZE))
break;
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
next = page->index;
if (PageFsCache(page)) {
__fscache_wait_on_page_write(cookie, page);
__fscache_uncache_page(cookie, page);
......@@ -1190,7 +1189,7 @@ void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
}
pagevec_release(&pvec);
cond_resched();
} while (++next);
} while (next);
_leave("");
}
......
......@@ -401,7 +401,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
const pgoff_t end = lend >> huge_page_shift(h);
struct vm_area_struct pseudo_vma;
struct pagevec pvec;
pgoff_t next;
pgoff_t next, index;
int i, freed = 0;
long lookup_nr = PAGEVEC_SIZE;
bool truncate_op = (lend == LLONG_MAX);
......@@ -420,7 +420,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
/*
* When no more pages are found, we are done.
*/
if (!pagevec_lookup(&pvec, mapping, next, lookup_nr))
if (!pagevec_lookup(&pvec, mapping, &next, lookup_nr))
break;
for (i = 0; i < pagevec_count(&pvec); ++i) {
......@@ -432,13 +432,13 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
* only possible in the punch hole case as end is
* max page offset in the truncate case.
*/
next = page->index;
if (next >= end)
index = page->index;
if (index >= end)
break;
hash = hugetlb_fault_mutex_hash(h, current->mm,
&pseudo_vma,
mapping, next, 0);
mapping, index, 0);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
......@@ -455,8 +455,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
i_mmap_lock_write(mapping);
hugetlb_vmdelete_list(&mapping->i_mmap,
next * pages_per_huge_page(h),
(next + 1) * pages_per_huge_page(h));
index * pages_per_huge_page(h),
(index + 1) * pages_per_huge_page(h));
i_mmap_unlock_write(mapping);
}
......@@ -475,14 +475,13 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
freed++;
if (!truncate_op) {
if (unlikely(hugetlb_unreserve_pages(inode,
next, next + 1, 1)))
index, index + 1, 1)))
hugetlb_fix_reserve_counts(inode);
}
unlock_page(page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
}
++next;
huge_pagevec_release(&pvec);
cond_resched();
}
......
......@@ -312,10 +312,9 @@ void nilfs_copy_back_pages(struct address_space *dmap,
pagevec_init(&pvec, 0);
repeat:
n = pagevec_lookup(&pvec, smap, index, PAGEVEC_SIZE);
n = pagevec_lookup(&pvec, smap, &index, PAGEVEC_SIZE);
if (!n)
return;
index = pvec.pages[n - 1]->index + 1;
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i], *dpage;
......
......@@ -228,7 +228,7 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
if (!pages)
goto out_free;
nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages);
nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages);
if (nr != lpages)
goto out_free_pages; /* leave if some pages were missing */
......
......@@ -353,7 +353,7 @@ struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
unsigned int nr_entries, struct page **entries,
pgoff_t *indices);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned find_get_pages(struct address_space *mapping, pgoff_t *start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
......
......@@ -28,7 +28,7 @@ unsigned pagevec_lookup_entries(struct pagevec *pvec,
pgoff_t *indices);
void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages);
pgoff_t *start, unsigned nr_pages);
unsigned pagevec_lookup_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, int tag,
unsigned nr_pages);
......
......@@ -403,7 +403,7 @@ bool filemap_range_has_page(struct address_space *mapping,
return false;
pagevec_init(&pvec, 0);
if (!pagevec_lookup(&pvec, mapping, index, 1))
if (!pagevec_lookup(&pvec, mapping, &index, 1))
return false;
ret = (pvec.pages[0]->index <= end);
pagevec_release(&pvec);
......@@ -1569,10 +1569,11 @@ unsigned find_get_entries(struct address_space *mapping,
*
* The search returns a group of mapping-contiguous pages with ascending
* indexes. There may be holes in the indices due to not-present pages.
* We also update @start to index the next page for the traversal.
*
* find_get_pages() returns the number of pages which were found.
*/
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
unsigned find_get_pages(struct address_space *mapping, pgoff_t *start,
unsigned int nr_pages, struct page **pages)
{
struct radix_tree_iter iter;
......@@ -1583,7 +1584,7 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
return 0;
rcu_read_lock();
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, *start) {
struct page *head, *page;
repeat:
page = radix_tree_deref_slot(slot);
......@@ -1625,6 +1626,10 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
}
rcu_read_unlock();
if (ret)
*start = pages[ret - 1]->index + 1;
return ret;
}
......
......@@ -957,12 +957,13 @@ void pagevec_remove_exceptionals(struct pagevec *pvec)
* reference against the pages in @pvec.
*
* The search returns a group of mapping-contiguous pages with ascending
* indexes. There may be holes in the indices due to not-present pages.
* indexes. There may be holes in the indices due to not-present pages. We
* also update @start to index the next page for the traversal.
*
* pagevec_lookup() returns the number of pages which were found.
*/
unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
pgoff_t start, unsigned nr_pages)
pgoff_t *start, unsigned nr_pages)
{
pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
return pagevec_count(pvec);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment