hugetlb: Convert huge_add_to_page_cache() to use a folio

Remove the last caller of add_to_page_cache()
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: default avatarMuchun Song <songmuchun@bytedance.com>
parent 211d0444
......@@ -759,7 +759,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
SetHPageMigratable(page);
/*
* unlock_page because locked by add_to_page_cache()
* unlock_page because locked by huge_add_to_page_cache()
* put_page() due to reference from alloc_huge_page()
*/
unlock_page(page);
......
......@@ -5414,19 +5414,25 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx)
{
struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
struct hstate *h = hstate_inode(inode);
int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
int err;
if (err)
__folio_set_locked(folio);
err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
if (unlikely(err)) {
__folio_clear_locked(folio);
return err;
}
ClearHPageRestoreReserve(page);
/*
* set page dirty so that it will not be removed from cache/file
* mark folio dirty so that it will not be removed from cache/file
* by non-hugetlbfs specific code paths.
*/
set_page_dirty(page);
folio_mark_dirty(folio);
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment