Commit 86692475 authored by Jingbo Xu's avatar Jingbo Xu Committed by Gao Xiang

fscache,cachefiles: add prepare_ondemand_read() callback

Add prepare_ondemand_read() callback dedicated for the on-demand read
scenario, so that callers from this scenario can be decoupled from
netfs_io_subrequest.

The original cachefiles_prepare_read() is now refactored to a generic
routine accepting a parameter list instead of netfs_io_subrequest.
There's no logic change, except that the debug id of subrequest and
request is removed from trace_cachefiles_prep_read().
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
Signed-off-by: default avatarJingbo Xu <jefflexu@linux.alibaba.com>
Acked-by: default avatarDavid Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20221124034212.81892-2-jefflexu@linux.alibaba.comSigned-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
parent 1282dea3
...@@ -385,38 +385,35 @@ static int cachefiles_write(struct netfs_cache_resources *cres, ...@@ -385,38 +385,35 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
term_func, term_func_priv); term_func, term_func_priv);
} }
/* static inline enum netfs_io_source
* Prepare a read operation, shortening it to a cached/uncached cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
* boundary as appropriate. loff_t start, size_t *_len, loff_t i_size,
*/ unsigned long *_flags, ino_t netfs_ino)
static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
loff_t i_size)
{ {
enum cachefiles_prepare_read_trace why; enum cachefiles_prepare_read_trace why;
struct netfs_io_request *rreq = subreq->rreq; struct cachefiles_object *object = NULL;
struct netfs_cache_resources *cres = &rreq->cache_resources;
struct cachefiles_object *object;
struct cachefiles_cache *cache; struct cachefiles_cache *cache;
struct fscache_cookie *cookie = fscache_cres_cookie(cres); struct fscache_cookie *cookie = fscache_cres_cookie(cres);
const struct cred *saved_cred; const struct cred *saved_cred;
struct file *file = cachefiles_cres_file(cres); struct file *file = cachefiles_cres_file(cres);
enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER; enum netfs_io_source ret = NETFS_DOWNLOAD_FROM_SERVER;
size_t len = *_len;
loff_t off, to; loff_t off, to;
ino_t ino = file ? file_inode(file)->i_ino : 0; ino_t ino = file ? file_inode(file)->i_ino : 0;
int rc; int rc;
_enter("%zx @%llx/%llx", subreq->len, subreq->start, i_size); _enter("%zx @%llx/%llx", len, start, i_size);
if (subreq->start >= i_size) { if (start >= i_size) {
ret = NETFS_FILL_WITH_ZEROES; ret = NETFS_FILL_WITH_ZEROES;
why = cachefiles_trace_read_after_eof; why = cachefiles_trace_read_after_eof;
goto out_no_object; goto out_no_object;
} }
if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) { if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
__set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); __set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
why = cachefiles_trace_read_no_data; why = cachefiles_trace_read_no_data;
if (!test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags)) if (!test_bit(NETFS_SREQ_ONDEMAND, _flags))
goto out_no_object; goto out_no_object;
} }
...@@ -437,7 +434,7 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * ...@@ -437,7 +434,7 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
retry: retry:
off = cachefiles_inject_read_error(); off = cachefiles_inject_read_error();
if (off == 0) if (off == 0)
off = vfs_llseek(file, subreq->start, SEEK_DATA); off = vfs_llseek(file, start, SEEK_DATA);
if (off < 0 && off >= (loff_t)-MAX_ERRNO) { if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
if (off == (loff_t)-ENXIO) { if (off == (loff_t)-ENXIO) {
why = cachefiles_trace_read_seek_nxio; why = cachefiles_trace_read_seek_nxio;
...@@ -449,21 +446,22 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * ...@@ -449,21 +446,22 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
goto out; goto out;
} }
if (off >= subreq->start + subreq->len) { if (off >= start + len) {
why = cachefiles_trace_read_found_hole; why = cachefiles_trace_read_found_hole;
goto download_and_store; goto download_and_store;
} }
if (off > subreq->start) { if (off > start) {
off = round_up(off, cache->bsize); off = round_up(off, cache->bsize);
subreq->len = off - subreq->start; len = off - start;
*_len = len;
why = cachefiles_trace_read_found_part; why = cachefiles_trace_read_found_part;
goto download_and_store; goto download_and_store;
} }
to = cachefiles_inject_read_error(); to = cachefiles_inject_read_error();
if (to == 0) if (to == 0)
to = vfs_llseek(file, subreq->start, SEEK_HOLE); to = vfs_llseek(file, start, SEEK_HOLE);
if (to < 0 && to >= (loff_t)-MAX_ERRNO) { if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
trace_cachefiles_io_error(object, file_inode(file), to, trace_cachefiles_io_error(object, file_inode(file), to,
cachefiles_trace_seek_error); cachefiles_trace_seek_error);
...@@ -471,12 +469,13 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * ...@@ -471,12 +469,13 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
goto out; goto out;
} }
if (to < subreq->start + subreq->len) { if (to < start + len) {
if (subreq->start + subreq->len >= i_size) if (start + len >= i_size)
to = round_up(to, cache->bsize); to = round_up(to, cache->bsize);
else else
to = round_down(to, cache->bsize); to = round_down(to, cache->bsize);
subreq->len = to - subreq->start; len = to - start;
*_len = len;
} }
why = cachefiles_trace_read_have_data; why = cachefiles_trace_read_have_data;
...@@ -484,12 +483,11 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * ...@@ -484,12 +483,11 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
goto out; goto out;
download_and_store: download_and_store:
__set_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); __set_bit(NETFS_SREQ_COPY_TO_CACHE, _flags);
if (test_bit(NETFS_SREQ_ONDEMAND, &subreq->flags)) { if (test_bit(NETFS_SREQ_ONDEMAND, _flags)) {
rc = cachefiles_ondemand_read(object, subreq->start, rc = cachefiles_ondemand_read(object, start, len);
subreq->len);
if (!rc) { if (!rc) {
__clear_bit(NETFS_SREQ_ONDEMAND, &subreq->flags); __clear_bit(NETFS_SREQ_ONDEMAND, _flags);
goto retry; goto retry;
} }
ret = NETFS_INVALID_READ; ret = NETFS_INVALID_READ;
...@@ -497,10 +495,34 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest * ...@@ -497,10 +495,34 @@ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *
out: out:
cachefiles_end_secure(cache, saved_cred); cachefiles_end_secure(cache, saved_cred);
out_no_object: out_no_object:
trace_cachefiles_prep_read(subreq, ret, why, ino); trace_cachefiles_prep_read(object, start, len, *_flags, ret, why, ino, netfs_ino);
return ret; return ret;
} }
/*
* Prepare a read operation, shortening it to a cached/uncached
* boundary as appropriate.
*/
static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
loff_t i_size)
{
return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
subreq->start, &subreq->len, i_size,
&subreq->flags, subreq->rreq->inode->i_ino);
}
/*
* Prepare an on-demand read operation, shortening it to a cached/uncached
* boundary as appropriate.
*/
static enum netfs_io_source
cachefiles_prepare_ondemand_read(struct netfs_cache_resources *cres,
loff_t start, size_t *_len, loff_t i_size,
unsigned long *_flags, ino_t ino)
{
return cachefiles_do_prepare_read(cres, start, _len, i_size, _flags, ino);
}
/* /*
* Prepare for a write to occur. * Prepare for a write to occur.
*/ */
...@@ -621,6 +643,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = { ...@@ -621,6 +643,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
.write = cachefiles_write, .write = cachefiles_write,
.prepare_read = cachefiles_prepare_read, .prepare_read = cachefiles_prepare_read,
.prepare_write = cachefiles_prepare_write, .prepare_write = cachefiles_prepare_write,
.prepare_ondemand_read = cachefiles_prepare_ondemand_read,
.query_occupancy = cachefiles_query_occupancy, .query_occupancy = cachefiles_query_occupancy,
}; };
......
...@@ -267,6 +267,14 @@ struct netfs_cache_ops { ...@@ -267,6 +267,14 @@ struct netfs_cache_ops {
loff_t *_start, size_t *_len, loff_t i_size, loff_t *_start, size_t *_len, loff_t i_size,
bool no_space_allocated_yet); bool no_space_allocated_yet);
/* Prepare an on-demand read operation, shortening it to a cached/uncached
* boundary as appropriate.
*/
enum netfs_io_source (*prepare_ondemand_read)(struct netfs_cache_resources *cres,
loff_t start, size_t *_len,
loff_t i_size,
unsigned long *_flags, ino_t ino);
/* Query the occupancy of the cache in a region, returning where the /* Query the occupancy of the cache in a region, returning where the
* next chunk of data starts and how long it is. * next chunk of data starts and how long it is.
*/ */
......
...@@ -428,16 +428,18 @@ TRACE_EVENT(cachefiles_vol_coherency, ...@@ -428,16 +428,18 @@ TRACE_EVENT(cachefiles_vol_coherency,
); );
TRACE_EVENT(cachefiles_prep_read, TRACE_EVENT(cachefiles_prep_read,
TP_PROTO(struct netfs_io_subrequest *sreq, TP_PROTO(struct cachefiles_object *obj,
loff_t start,
size_t len,
unsigned short flags,
enum netfs_io_source source, enum netfs_io_source source,
enum cachefiles_prepare_read_trace why, enum cachefiles_prepare_read_trace why,
ino_t cache_inode), ino_t cache_inode, ino_t netfs_inode),
TP_ARGS(sreq, source, why, cache_inode), TP_ARGS(obj, start, len, flags, source, why, cache_inode, netfs_inode),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, rreq ) __field(unsigned int, obj )
__field(unsigned short, index )
__field(unsigned short, flags ) __field(unsigned short, flags )
__field(enum netfs_io_source, source ) __field(enum netfs_io_source, source )
__field(enum cachefiles_prepare_read_trace, why ) __field(enum cachefiles_prepare_read_trace, why )
...@@ -448,19 +450,18 @@ TRACE_EVENT(cachefiles_prep_read, ...@@ -448,19 +450,18 @@ TRACE_EVENT(cachefiles_prep_read,
), ),
TP_fast_assign( TP_fast_assign(
__entry->rreq = sreq->rreq->debug_id; __entry->obj = obj ? obj->debug_id : 0;
__entry->index = sreq->debug_index; __entry->flags = flags;
__entry->flags = sreq->flags;
__entry->source = source; __entry->source = source;
__entry->why = why; __entry->why = why;
__entry->len = sreq->len; __entry->len = len;
__entry->start = sreq->start; __entry->start = start;
__entry->netfs_inode = sreq->rreq->inode->i_ino; __entry->netfs_inode = netfs_inode;
__entry->cache_inode = cache_inode; __entry->cache_inode = cache_inode;
), ),
TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx ni=%x B=%x", TP_printk("o=%08x %s %s f=%02x s=%llx %zx ni=%x B=%x",
__entry->rreq, __entry->index, __entry->obj,
__print_symbolic(__entry->source, netfs_sreq_sources), __print_symbolic(__entry->source, netfs_sreq_sources),
__print_symbolic(__entry->why, cachefiles_prepare_read_traces), __print_symbolic(__entry->why, cachefiles_prepare_read_traces),
__entry->flags, __entry->flags,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment