Commit 5c211ba2 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds

mm: add and use find_lock_entries

We have three functions (shmem_undo_range(), truncate_inode_pages_range()
and invalidate_mapping_pages()) which want exactly this function, so add
it to filemap.c.  Before this patch, shmem_undo_range() would split any
compound page which overlaps either end of the range being punched in both
the first and second loops through the address space.  After this patch,
that functionality is left for the second loop, which is arguably more
appropriate since the first loop is supposed to run through all the pages
quickly, and splitting a page can sleep.

[willy@infradead.org: add assertion]
  Link: https://lkml.kernel.org/r/20201124041507.28996-3-willy@infradead.org

Link: https://lkml.kernel.org/r/20201112212641.27837-10-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJan Kara <jack@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Yang Shi <yang.shi@linux.alibaba.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 54fa39ac
...@@ -1920,6 +1920,65 @@ unsigned find_get_entries(struct address_space *mapping, ...@@ -1920,6 +1920,65 @@ unsigned find_get_entries(struct address_space *mapping,
return ret; return ret;
} }
/**
* find_lock_entries - Find a batch of pagecache entries.
* @mapping: The address_space to search.
* @start: The starting page cache index.
* @end: The final page index (inclusive).
* @pvec: Where the resulting entries are placed.
* @indices: The cache indices of the entries in @pvec.
*
* find_lock_entries() will return a batch of entries from @mapping.
* Swap, shadow and DAX entries are included. Pages are returned
* locked and with an incremented refcount. Pages which are locked by
* somebody else or under writeback are skipped. Only the head page of
* a THP is returned. Pages which are partially outside the range are
* not returned.
*
* The entries have ascending indexes. The indices may not be consecutive
* due to not-present entries, THP pages, pages which could not be locked
* or pages under writeback.
*
* Return: The number of entries which were found.
*/
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct pagevec *pvec, pgoff_t *indices)
{
XA_STATE(xas, &mapping->i_pages, start);
struct page *page;
rcu_read_lock();
while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
if (!xa_is_value(page)) {
if (page->index < start)
goto put;
VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
if (page->index + thp_nr_pages(page) - 1 > end)
goto put;
if (!trylock_page(page))
goto put;
if (page->mapping != mapping || PageWriteback(page))
goto unlock;
VM_BUG_ON_PAGE(!thp_contains(page, xas.xa_index),
page);
}
indices[pvec->nr] = xas.xa_index;
if (!pagevec_add(pvec, page))
break;
goto next;
unlock:
unlock_page(page);
put:
put_page(page);
next:
if (!xa_is_value(page) && PageTransHuge(page))
xas_set(&xas, page->index + thp_nr_pages(page));
}
rcu_read_unlock();
return pagevec_count(pvec);
}
/** /**
* find_get_pages_range - gang pagecache lookup * find_get_pages_range - gang pagecache lookup
* @mapping: The address_space to search * @mapping: The address_space to search
......
...@@ -60,6 +60,9 @@ static inline void force_page_cache_readahead(struct address_space *mapping, ...@@ -60,6 +60,9 @@ static inline void force_page_cache_readahead(struct address_space *mapping,
force_page_cache_ra(&ractl, &file->f_ra, nr_to_read); force_page_cache_ra(&ractl, &file->f_ra, nr_to_read);
} }
unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
/** /**
* page_evictable - test whether a page is evictable * page_evictable - test whether a page is evictable
* @page: the page to test * @page: the page to test
......
...@@ -907,12 +907,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -907,12 +907,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
pagevec_init(&pvec); pagevec_init(&pvec);
index = start; index = start;
while (index < end) { while (index < end && find_lock_entries(mapping, index, end - 1,
pvec.nr = find_get_entries(mapping, index, &pvec, indices)) {
min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr)
break;
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
...@@ -927,18 +923,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -927,18 +923,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
index, page); index, page);
continue; continue;
} }
index += thp_nr_pages(page) - 1;
VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page); if (!unfalloc || !PageUptodate(page))
truncate_inode_page(mapping, page);
if (!trylock_page(page))
continue;
if ((!unfalloc || !PageUptodate(page)) &&
page_mapping(page) == mapping) {
VM_BUG_ON_PAGE(PageWriteback(page), page);
if (shmem_punch_compound(page, start, end))
truncate_inode_page(mapping, page);
}
unlock_page(page); unlock_page(page);
} }
pagevec_remove_exceptionals(&pvec); pagevec_remove_exceptionals(&pvec);
......
...@@ -326,51 +326,19 @@ void truncate_inode_pages_range(struct address_space *mapping, ...@@ -326,51 +326,19 @@ void truncate_inode_pages_range(struct address_space *mapping,
pagevec_init(&pvec); pagevec_init(&pvec);
index = start; index = start;
while (index < end && pagevec_lookup_entries(&pvec, mapping, index, while (index < end && find_lock_entries(mapping, index, end - 1,
min(end - index, (pgoff_t)PAGEVEC_SIZE), &pvec, indices)) {
indices)) { index = indices[pagevec_count(&pvec) - 1] + 1;
/*
* Pagevec array has exceptional entries and we may also fail
* to lock some pages. So we store pages that can be deleted
* in a new pagevec.
*/
struct pagevec locked_pvec;
pagevec_init(&locked_pvec);
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
/* We rely upon deletion not changing page->index */
index = indices[i];
if (index >= end)
break;
if (xa_is_value(page))
continue;
if (!trylock_page(page))
continue;
WARN_ON(page_to_index(page) != index);
if (PageWriteback(page)) {
unlock_page(page);
continue;
}
if (page->mapping != mapping) {
unlock_page(page);
continue;
}
pagevec_add(&locked_pvec, page);
}
for (i = 0; i < pagevec_count(&locked_pvec); i++)
truncate_cleanup_page(mapping, locked_pvec.pages[i]);
delete_from_page_cache_batch(mapping, &locked_pvec);
for (i = 0; i < pagevec_count(&locked_pvec); i++)
unlock_page(locked_pvec.pages[i]);
truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
for (i = 0; i < pagevec_count(&pvec); i++)
truncate_cleanup_page(mapping, pvec.pages[i]);
delete_from_page_cache_batch(mapping, &pvec);
for (i = 0; i < pagevec_count(&pvec); i++)
unlock_page(pvec.pages[i]);
pagevec_release(&pvec); pagevec_release(&pvec);
cond_resched(); cond_resched();
index++;
} }
if (partial_start) { if (partial_start) {
struct page *page = find_lock_page(mapping, start - 1); struct page *page = find_lock_page(mapping, start - 1);
if (page) { if (page) {
...@@ -539,9 +507,7 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping, ...@@ -539,9 +507,7 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
int i; int i;
pagevec_init(&pvec); pagevec_init(&pvec);
while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, while (find_lock_entries(mapping, index, end, &pvec, indices)) {
min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
indices)) {
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct page *page = pvec.pages[i];
...@@ -555,39 +521,7 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping, ...@@ -555,39 +521,7 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
page); page);
continue; continue;
} }
index += thp_nr_pages(page) - 1;
if (!trylock_page(page))
continue;
WARN_ON(page_to_index(page) != index);
/* Middle of THP: skip */
if (PageTransTail(page)) {
unlock_page(page);
continue;
} else if (PageTransHuge(page)) {
index += HPAGE_PMD_NR - 1;
i += HPAGE_PMD_NR - 1;
/*
* 'end' is in the middle of THP. Don't
* invalidate the page as the part outside of
* 'end' could be still useful.
*/
if (index > end) {
unlock_page(page);
continue;
}
/* Take a pin outside pagevec */
get_page(page);
/*
* Drop extra pins before trying to invalidate
* the huge page.
*/
pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec);
}
ret = invalidate_inode_page(page); ret = invalidate_inode_page(page);
unlock_page(page); unlock_page(page);
...@@ -601,9 +535,6 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping, ...@@ -601,9 +535,6 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
if (nr_pagevec) if (nr_pagevec)
(*nr_pagevec)++; (*nr_pagevec)++;
} }
if (PageTransHuge(page))
put_page(page);
count += ret; count += ret;
} }
pagevec_remove_exceptionals(&pvec); pagevec_remove_exceptionals(&pvec);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment