Commit a6906972 authored by Matthew Wilcox's avatar Matthew Wilcox

page cache; Convert find_get_pages_range_tag to XArray

The 'end' parameter of the xas_for_each iterator avoids a useless
iteration at the end of the range.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 3ece58a2
...@@ -363,10 +363,10 @@ static inline unsigned find_get_pages(struct address_space *mapping, ...@@ -363,10 +363,10 @@ static inline unsigned find_get_pages(struct address_space *mapping,
unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages); unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pgoff_t end, int tag, unsigned int nr_pages, pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
struct page **pages); struct page **pages);
static inline unsigned find_get_pages_tag(struct address_space *mapping, static inline unsigned find_get_pages_tag(struct address_space *mapping,
pgoff_t *index, int tag, unsigned int nr_pages, pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
struct page **pages) struct page **pages)
{ {
return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
......
...@@ -1789,74 +1789,58 @@ EXPORT_SYMBOL(find_get_pages_contig); ...@@ -1789,74 +1789,58 @@ EXPORT_SYMBOL(find_get_pages_contig);
* @tag. We update @index to index the next page for the traversal. * @tag. We update @index to index the next page for the traversal.
*/ */
unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
pgoff_t end, int tag, unsigned int nr_pages, pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
struct page **pages) struct page **pages)
{ {
struct radix_tree_iter iter; XA_STATE(xas, &mapping->i_pages, *index);
void **slot; struct page *page;
unsigned ret = 0; unsigned ret = 0;
if (unlikely(!nr_pages)) if (unlikely(!nr_pages))
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, *index, tag) { xas_for_each_marked(&xas, page, end, tag) {
struct page *head, *page; struct page *head;
if (xas_retry(&xas, page))
if (iter.index > end)
break;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue; continue;
/*
if (radix_tree_exception(page)) { * Shadow entries should never be tagged, but this iteration
if (radix_tree_deref_retry(page)) { * is lockless so there is a window for page reclaim to evict
slot = radix_tree_iter_retry(&iter); * a page we saw tagged. Skip over it.
continue; */
} if (xa_is_value(page))
/*
* A shadow entry of a recently evicted page.
*
* Those entries should never be tagged, but
* this tree walk is lockless and the tags are
* looked up in bulk, one radix tree node at a
* time, so there is a sizable window for page
* reclaim to evict a page we saw tagged.
*
* Skip over it.
*/
continue; continue;
}
head = compound_head(page); head = compound_head(page);
if (!page_cache_get_speculative(head)) if (!page_cache_get_speculative(head))
goto repeat; goto retry;
/* The page was split under us? */ /* The page was split under us? */
if (compound_head(page) != head) { if (compound_head(page) != head)
put_page(head); goto put_page;
goto repeat;
}
/* Has the page moved? */ /* Has the page moved? */
if (unlikely(page != *slot)) { if (unlikely(page != xas_reload(&xas)))
put_page(head); goto put_page;
goto repeat;
}
pages[ret] = page; pages[ret] = page;
if (++ret == nr_pages) { if (++ret == nr_pages) {
*index = pages[ret - 1]->index + 1; *index = page->index + 1;
goto out; goto out;
} }
continue;
put_page:
put_page(head);
retry:
xas_reset(&xas);
} }
/* /*
* We come here when we got at @end. We take care to not overflow the * We come here when we got to @end. We take care to not overflow the
* index @index as it confuses some of the callers. This breaks the * index @index as it confuses some of the callers. This breaks the
* iteration when there is page at index -1 but that is already broken * iteration when there is a page at index -1 but that is already
* anyway. * broken anyway.
*/ */
if (end == (pgoff_t)-1) if (end == (pgoff_t)-1)
*index = (pgoff_t)-1; *index = (pgoff_t)-1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment