shmem: Convert part of shmem_undo_range() to use a folio

find_lock_entries() never returns tail pages.  We cannot use page_folio()
here as the pagevec may also contain swap entries, so simply cast for
now.  This is an intermediate step which will be fully removed by the
end of this series.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent 3506659e
...@@ -936,22 +936,22 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, ...@@ -936,22 +936,22 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
while (index < end && find_lock_entries(mapping, index, end - 1, while (index < end && find_lock_entries(mapping, index, end - 1,
&pvec, indices)) { &pvec, indices)) {
for (i = 0; i < pagevec_count(&pvec); i++) { for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i]; struct folio *folio = (struct folio *)pvec.pages[i];
index = indices[i]; index = indices[i];
if (xa_is_value(page)) { if (xa_is_value(folio)) {
if (unfalloc) if (unfalloc)
continue; continue;
nr_swaps_freed += !shmem_free_swap(mapping, nr_swaps_freed += !shmem_free_swap(mapping,
index, page); index, folio);
continue; continue;
} }
index += thp_nr_pages(page) - 1; index += folio_nr_pages(folio) - 1;
if (!unfalloc || !PageUptodate(page)) if (!unfalloc || !folio_test_uptodate(folio))
truncate_inode_page(mapping, page); truncate_inode_page(mapping, &folio->page);
unlock_page(page); folio_unlock(folio);
} }
pagevec_remove_exceptionals(&pvec); pagevec_remove_exceptionals(&pvec);
pagevec_release(&pvec); pagevec_release(&pvec);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment