Commit 0174ee99 authored by David Howells's avatar David Howells Committed by Steve French

cifs: Implement cache I/O by accessing the cache directly

Move cifs to using fscache DIO API instead of the old upstream I/O API as
that has been removed.  This is a stopgap solution as the intention is that
at sometime in the future, the cache will move to using larger blocks and
won't be able to store individual pages in order to deal with the potential
for data corruption due to the backing filesystem being able insert/remove
bridging blocks of zeros into its extent list[1].

cifs then reads and writes cache pages synchronously and one page at a time.

The preferred change would be to use the netfs lib, but the new I/O API can
be used directly.  It's just that as the cache now needs to track data for
itself, caching blocks may exceed page size...

This code is somewhat borrowed from my "fallback I/O" patchset[2].
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: Steve French <smfrench@gmail.com>
cc: Shyam Prasad N <nspmangalore@gmail.com>
cc: linux-cifs@vger.kernel.org
cc: linux-cachefs@redhat.com
Link: https://lore.kernel.org/r/YO17ZNOcq+9PajfQ@mit.edu [1]
Link: https://lore.kernel.org/r/202112100957.2oEDT20W-lkp@intel.com/ [2]
Acked-by: default avatarJeff Layton <jlayton@kernel.org>
Signed-off-by: default avatarSteve French <stfrench@microsoft.com>
parent bee9f655
......@@ -4276,12 +4276,12 @@ cifs_readv_complete(struct work_struct *work)
} else
SetPageError(page);
unlock_page(page);
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes))
cifs_readpage_to_fscache(rdata->mapping->host, page);
unlock_page(page);
got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
put_page(page);
......@@ -4396,7 +4396,11 @@ static void cifs_readahead(struct readahead_control *ractl)
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
struct TCP_Server_Info *server;
pid_t pid;
unsigned int xid, last_batch_size = 0;
unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0;
pgoff_t next_cached = ULONG_MAX;
bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
cifs_inode_cookie(ractl->mapping->host)->cache_priv;
bool check_cache = caching;
xid = get_xid();
......@@ -4414,12 +4418,52 @@ static void cifs_readahead(struct readahead_control *ractl)
/*
* Chop the readahead request up into rsize-sized read requests.
*/
while (readahead_count(ractl) - last_batch_size) {
unsigned int i, nr_pages, got, rsize;
while ((nr_pages = readahead_count(ractl) - last_batch_size)) {
unsigned int i, got, rsize;
struct page *page;
struct cifs_readdata *rdata;
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;
pgoff_t index = readahead_index(ractl) + last_batch_size;
/*
* Find out if we have anything cached in the range of
* interest, and if so, where the next chunk of cached data is.
*/
if (caching) {
if (check_cache) {
rc = cifs_fscache_query_occupancy(
ractl->mapping->host, index, nr_pages,
&next_cached, &cache_nr_pages);
if (rc < 0)
caching = false;
check_cache = false;
}
if (index == next_cached) {
/*
* TODO: Send a whole batch of pages to be read
* by the cache.
*/
page = readahead_page(ractl);
if (cifs_readpage_from_fscache(ractl->mapping->host,
page) < 0) {
/*
* TODO: Deal with cache read failure
* here, but for the moment, delegate
* that to readpage.
*/
caching = false;
}
unlock_page(page);
next_cached++;
cache_nr_pages--;
if (cache_nr_pages == 0)
check_cache = true;
continue;
}
}
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true);
......@@ -4435,6 +4479,7 @@ static void cifs_readahead(struct readahead_control *ractl)
if (rc)
break;
nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
nr_pages = min_t(size_t, nr_pages, next_cached - index);
/*
* Give up immediately if rsize is too small to read an entire
......
......@@ -134,37 +134,127 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
}
}
static inline void fscache_end_operation(struct netfs_cache_resources *cres)
{
const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
if (ops)
ops->end_operation(cres);
}
/*
* Retrieve a page from FS-Cache
* Fallback page reading interface.
*/
int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
static int fscache_fallback_read_page(struct inode *inode, struct page *page)
{
cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
__func__, CIFS_I(inode)->fscache, page, inode);
return -ENOBUFS; // Needs conversion to using netfslib
struct netfs_cache_resources cres;
struct fscache_cookie *cookie = cifs_inode_cookie(inode);
struct iov_iter iter;
struct bio_vec bvec[1];
int ret;
memset(&cres, 0, sizeof(cres));
bvec[0].bv_page = page;
bvec[0].bv_offset = 0;
bvec[0].bv_len = PAGE_SIZE;
iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
ret = fscache_begin_read_operation(&cres, cookie);
if (ret < 0)
return ret;
ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
NULL, NULL);
fscache_end_operation(&cres);
return ret;
}
/*
* Retrieve a set of pages from FS-Cache
* Fallback page writing interface.
*/
int __cifs_readpages_from_fscache(struct inode *inode,
struct address_space *mapping,
struct list_head *pages,
unsigned *nr_pages)
static int fscache_fallback_write_page(struct inode *inode, struct page *page,
bool no_space_allocated_yet)
{
cifs_dbg(FYI, "%s: (0x%p/%u/0x%p)\n",
__func__, CIFS_I(inode)->fscache, *nr_pages, inode);
return -ENOBUFS; // Needs conversion to using netfslib
struct netfs_cache_resources cres;
struct fscache_cookie *cookie = cifs_inode_cookie(inode);
struct iov_iter iter;
struct bio_vec bvec[1];
loff_t start = page_offset(page);
size_t len = PAGE_SIZE;
int ret;
memset(&cres, 0, sizeof(cres));
bvec[0].bv_page = page;
bvec[0].bv_offset = 0;
bvec[0].bv_len = PAGE_SIZE;
iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
ret = fscache_begin_write_operation(&cres, cookie);
if (ret < 0)
return ret;
ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
no_space_allocated_yet);
if (ret == 0)
ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
fscache_end_operation(&cres);
return ret;
}
void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
/*
* Retrieve a page from FS-Cache
*/
int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
struct cifsInodeInfo *cifsi = CIFS_I(inode);
int ret;
cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
__func__, cifs_inode_cookie(inode), page, inode);
ret = fscache_fallback_read_page(inode, page);
if (ret < 0)
return ret;
WARN_ON(!cifsi->fscache);
/* Read completed synchronously */
SetPageUptodate(page);
return 0;
}
void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
{
cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
__func__, cifsi->fscache, page, inode);
__func__, cifs_inode_cookie(inode), page, inode);
fscache_fallback_write_page(inode, page, true);
}
/*
* Query the cache occupancy.
*/
int __cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{
struct netfs_cache_resources cres;
struct fscache_cookie *cookie = cifs_inode_cookie(inode);
loff_t start, data_start;
size_t len, data_len;
int ret;
// Needs conversion to using netfslib
ret = fscache_begin_read_operation(&cres, cookie);
if (ret < 0)
return ret;
start = first * PAGE_SIZE;
len = nr_pages * PAGE_SIZE;
ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE,
&data_start, &data_len);
if (ret == 0) {
*_data_first = data_start / PAGE_SIZE;
*_data_nr_pages = len / PAGE_SIZE;
}
fscache_end_operation(&cres);
return ret;
}
......@@ -9,6 +9,7 @@
#ifndef _CIFS_FSCACHE_H
#define _CIFS_FSCACHE_H
#include <linux/swap.h>
#include <linux/fscache.h>
#include "cifsglob.h"
......@@ -58,14 +59,6 @@ void cifs_fscache_fill_coherency(struct inode *inode,
}
extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
extern int __cifs_readpages_from_fscache(struct inode *,
struct address_space *,
struct list_head *,
unsigned *);
extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
{
return CIFS_I(inode)->fscache;
......@@ -80,33 +73,52 @@ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags
i_size_read(inode), flags);
}
static inline int cifs_readpage_from_fscache(struct inode *inode,
struct page *page)
{
if (CIFS_I(inode)->fscache)
return __cifs_readpage_from_fscache(inode, page);
extern int __cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages);
static inline int cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{
if (!cifs_inode_cookie(inode))
return -ENOBUFS;
return __cifs_fscache_query_occupancy(inode, first, nr_pages,
_data_first, _data_nr_pages);
}
static inline int cifs_readpages_from_fscache(struct inode *inode,
struct address_space *mapping,
struct list_head *pages,
unsigned *nr_pages)
extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage);
extern void __cifs_readpage_to_fscache(struct inode *pinode, struct page *ppage);
static inline int cifs_readpage_from_fscache(struct inode *inode,
struct page *page)
{
if (CIFS_I(inode)->fscache)
return __cifs_readpages_from_fscache(inode, mapping, pages,
nr_pages);
if (cifs_inode_cookie(inode))
return __cifs_readpage_from_fscache(inode, page);
return -ENOBUFS;
}
static inline void cifs_readpage_to_fscache(struct inode *inode,
struct page *page)
{
if (PageFsCache(page))
if (cifs_inode_cookie(inode))
__cifs_readpage_to_fscache(inode, page);
}
static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
{
if (PageFsCache(page)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
wait_on_page_fscache(page);
fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
}
return true;
}
#else /* CONFIG_CIFS_FSCACHE */
static inline
void cifs_fscache_fill_coherency(struct inode *inode,
......@@ -123,22 +135,29 @@ static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool upd
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
static inline int
cifs_readpage_from_fscache(struct inode *inode, struct page *page)
static inline int cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{
*_data_first = ULONG_MAX;
*_data_nr_pages = 0;
return -ENOBUFS;
}
static inline int cifs_readpages_from_fscache(struct inode *inode,
struct address_space *mapping,
struct list_head *pages,
unsigned *nr_pages)
static inline int
cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
return -ENOBUFS;
}
static inline void cifs_readpage_to_fscache(struct inode *inode,
struct page *page) {}
static inline
void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {}
static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
return true; /* May release page */
}
#endif /* CONFIG_CIFS_FSCACHE */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment