Commit e97d70a5 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Chandan Babu R

xfs: use shmem_get_folio in in xfile_load

Switch to using shmem_get_folio in xfile_load instead of using
shmem_read_mapping_page_gfp.  This gets us support for large folios
and also optimized reading from unallocated space, as
shmem_get_folio with SGP_READ won't allocate a page for them just
to zero the content.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatar"Darrick J. Wong" <djwong@kernel.org>
Signed-off-by: default avatarChandan Babu R <chandanbabu@kernel.org>
parent fd2634e2
...@@ -34,13 +34,6 @@ ...@@ -34,13 +34,6 @@
* xfiles assume that the caller will handle all required concurrency * xfiles assume that the caller will handle all required concurrency
* management; standard vfs locks (freezer and inode) are not taken. Reads * management; standard vfs locks (freezer and inode) are not taken. Reads
* and writes are satisfied directly from the page cache. * and writes are satisfied directly from the page cache.
*
* NOTE: The current shmemfs implementation has a quirk that in-kernel reads
* of a hole cause a page to be mapped into the file. If you are going to
* create a sparse xfile, please be careful about reading from uninitialized
* parts of the file. These pages are !Uptodate and will eventually be
* reclaimed if not written, but in the short term this boosts memory
* consumption.
*/ */
/* /*
...@@ -118,10 +111,7 @@ xfile_load( ...@@ -118,10 +111,7 @@ xfile_load(
loff_t pos) loff_t pos)
{ {
struct inode *inode = file_inode(xf->file); struct inode *inode = file_inode(xf->file);
struct address_space *mapping = inode->i_mapping;
struct page *page = NULL;
unsigned int pflags; unsigned int pflags;
int error = 0;
if (count > MAX_RW_COUNT) if (count > MAX_RW_COUNT)
return -ENOMEM; return -ENOMEM;
...@@ -132,43 +122,44 @@ xfile_load( ...@@ -132,43 +122,44 @@ xfile_load(
pflags = memalloc_nofs_save(); pflags = memalloc_nofs_save();
while (count > 0) { while (count > 0) {
struct folio *folio;
unsigned int len; unsigned int len;
unsigned int offset;
len = min_t(ssize_t, count, PAGE_SIZE - offset_in_page(pos)); if (shmem_get_folio(inode, pos >> PAGE_SHIFT, &folio,
SGP_READ) < 0)
/* break;
* In-kernel reads of a shmem file cause it to allocate a page if (!folio) {
* if the mapping shows a hole. Therefore, if we hit ENOMEM /*
* we can continue by zeroing the caller's buffer. * No data stored at this offset, just zero the output
*/ * buffer until the next page boundary.
page = shmem_read_mapping_page_gfp(mapping, pos >> PAGE_SHIFT, */
__GFP_NOWARN); len = min_t(ssize_t, count,
if (IS_ERR(page)) { PAGE_SIZE - offset_in_page(pos));
error = PTR_ERR(page); memset(buf, 0, len);
if (error != -ENOMEM) { } else {
error = -ENOMEM; if (filemap_check_wb_err(inode->i_mapping, 0)) {
folio_unlock(folio);
folio_put(folio);
break; break;
} }
memset(buf, 0, len); offset = offset_in_folio(folio, pos);
goto advance; len = min_t(ssize_t, count, folio_size(folio) - offset);
} memcpy(buf, folio_address(folio) + offset, len);
/*
* xfile pages must never be mapped into userspace, so
* we skip the dcache flush.
*/
memcpy(buf, page_address(page) + offset_in_page(pos), len);
put_page(page);
advance: folio_unlock(folio);
folio_put(folio);
}
count -= len; count -= len;
pos += len; pos += len;
buf += len; buf += len;
} }
memalloc_nofs_restore(pflags); memalloc_nofs_restore(pflags);
return error; if (count)
return -ENOMEM;
return 0;
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment