Commit 663dfb65 authored by David Howells's avatar David Howells

netfs: Refactor arguments for netfs_alloc_read_request

Pass start and len to the rreq allocator. This should ensure that the
fields are set so that ->init_request() can use them.

Also add a parameter to indicates the origin of the request.  Ceph can use
this to tell whether to get caps.

Changes
=======
ver #3)
 - Change the author to me as Jeff feels that most of the patch is my
   changes now.

ver #2)
 - Show the request origin in the netfs_rreq tracepoint.
Signed-off-by: default avatarJeff Layton <jlayton@kernel.org>
Co-developed-by: default avatarDavid Howells <dhowells@redhat.com>
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
cc: linux-cachefs@redhat.com

Link: https://lore.kernel.org/r/164622989020.3564931.17517006047854958747.stgit@warthog.procyon.org.uk/ # v1
Link: https://lore.kernel.org/r/164678208569.1200972.12153682697842916557.stgit@warthog.procyon.org.uk/ # v2
Link: https://lore.kernel.org/r/164692904155.2099075.14717645623034355995.stgit@warthog.procyon.org.uk/ # v3
parent 5c88705e
...@@ -17,9 +17,12 @@ ...@@ -17,9 +17,12 @@
/* /*
* objects.c * objects.c
*/ */
struct netfs_io_request *netfs_alloc_request(const struct netfs_request_ops *ops, struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
struct file *file,
const struct netfs_request_ops *ops,
void *netfs_priv, void *netfs_priv,
struct file *file); loff_t start, size_t len,
enum netfs_io_origin origin);
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what); void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async); void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
void netfs_put_request(struct netfs_io_request *rreq, bool was_async, void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
......
...@@ -11,17 +11,24 @@ ...@@ -11,17 +11,24 @@
/* /*
* Allocate an I/O request and initialise it. * Allocate an I/O request and initialise it.
*/ */
struct netfs_io_request *netfs_alloc_request( struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
const struct netfs_request_ops *ops, void *netfs_priv, struct file *file,
struct file *file) const struct netfs_request_ops *ops,
void *netfs_priv,
loff_t start, size_t len,
enum netfs_io_origin origin)
{ {
static atomic_t debug_ids; static atomic_t debug_ids;
struct netfs_io_request *rreq; struct netfs_io_request *rreq;
rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL); rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
if (rreq) { if (rreq) {
rreq->start = start;
rreq->len = len;
rreq->origin = origin;
rreq->netfs_ops = ops; rreq->netfs_ops = ops;
rreq->netfs_priv = netfs_priv; rreq->netfs_priv = netfs_priv;
rreq->mapping = mapping;
rreq->inode = file_inode(file); rreq->inode = file_inode(file);
rreq->i_size = i_size_read(rreq->inode); rreq->i_size = i_size_read(rreq->inode);
rreq->debug_id = atomic_inc_return(&debug_ids); rreq->debug_id = atomic_inc_return(&debug_ids);
......
...@@ -763,12 +763,13 @@ void netfs_readahead(struct readahead_control *ractl, ...@@ -763,12 +763,13 @@ void netfs_readahead(struct readahead_control *ractl,
if (readahead_count(ractl) == 0) if (readahead_count(ractl) == 0)
goto cleanup; goto cleanup;
rreq = netfs_alloc_request(ops, netfs_priv, ractl->file); rreq = netfs_alloc_request(ractl->mapping, ractl->file,
ops, netfs_priv,
readahead_pos(ractl),
readahead_length(ractl),
NETFS_READAHEAD);
if (!rreq) if (!rreq)
goto cleanup; goto cleanup;
rreq->mapping = ractl->mapping;
rreq->start = readahead_pos(ractl);
rreq->len = readahead_length(ractl);
if (ops->begin_cache_operation) { if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq); ret = ops->begin_cache_operation(rreq);
...@@ -838,16 +839,15 @@ int netfs_readpage(struct file *file, ...@@ -838,16 +839,15 @@ int netfs_readpage(struct file *file,
_enter("%lx", folio_index(folio)); _enter("%lx", folio_index(folio));
rreq = netfs_alloc_request(ops, netfs_priv, file); rreq = netfs_alloc_request(folio->mapping, file, ops, netfs_priv,
folio_file_pos(folio), folio_size(folio),
NETFS_READPAGE);
if (!rreq) { if (!rreq) {
if (netfs_priv) if (netfs_priv)
ops->cleanup(folio_file_mapping(folio), netfs_priv); ops->cleanup(folio_file_mapping(folio), netfs_priv);
folio_unlock(folio); folio_unlock(folio);
return -ENOMEM; return -ENOMEM;
} }
rreq->mapping = folio_file_mapping(folio);
rreq->start = folio_file_pos(folio);
rreq->len = folio_size(folio);
if (ops->begin_cache_operation) { if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq); ret = ops->begin_cache_operation(rreq);
...@@ -1008,12 +1008,11 @@ int netfs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1008,12 +1008,11 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
} }
ret = -ENOMEM; ret = -ENOMEM;
rreq = netfs_alloc_request(ops, netfs_priv, file); rreq = netfs_alloc_request(mapping, file, ops, netfs_priv,
folio_file_pos(folio), folio_size(folio),
NETFS_READ_FOR_WRITE);
if (!rreq) if (!rreq)
goto error; goto error;
rreq->mapping = folio_file_mapping(folio);
rreq->start = folio_file_pos(folio);
rreq->len = folio_size(folio);
rreq->no_unlock_folio = folio_index(folio); rreq->no_unlock_folio = folio_index(folio);
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags); __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
netfs_priv = NULL; netfs_priv = NULL;
......
...@@ -150,6 +150,12 @@ struct netfs_io_subrequest { ...@@ -150,6 +150,12 @@ struct netfs_io_subrequest {
#define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */
}; };
enum netfs_io_origin {
NETFS_READAHEAD, /* This read was triggered by readahead */
NETFS_READPAGE, /* This read is a synchronous read */
NETFS_READ_FOR_WRITE, /* This read is to prepare a write */
} __mode(byte);
/* /*
* Descriptor for an I/O helper request. This is used to make multiple I/O * Descriptor for an I/O helper request. This is used to make multiple I/O
* operations to a variety of data stores and then stitch the result together. * operations to a variety of data stores and then stitch the result together.
...@@ -167,6 +173,7 @@ struct netfs_io_request { ...@@ -167,6 +173,7 @@ struct netfs_io_request {
size_t submitted; /* Amount submitted for I/O so far */ size_t submitted; /* Amount submitted for I/O so far */
size_t len; /* Length of the request */ size_t len; /* Length of the request */
short error; /* 0 or error that occurred */ short error; /* 0 or error that occurred */
enum netfs_io_origin origin; /* Origin of the request */
loff_t i_size; /* Size of the file */ loff_t i_size; /* Size of the file */
loff_t start; /* Start position */ loff_t start; /* Start position */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
......
...@@ -21,6 +21,11 @@ ...@@ -21,6 +21,11 @@
EM(netfs_read_trace_readpage, "READPAGE ") \ EM(netfs_read_trace_readpage, "READPAGE ") \
E_(netfs_read_trace_write_begin, "WRITEBEGN") E_(netfs_read_trace_write_begin, "WRITEBEGN")
#define netfs_rreq_origins \
EM(NETFS_READAHEAD, "RA") \
EM(NETFS_READPAGE, "RP") \
E_(NETFS_READ_FOR_WRITE, "RW")
#define netfs_rreq_traces \ #define netfs_rreq_traces \
EM(netfs_rreq_trace_assess, "ASSESS ") \ EM(netfs_rreq_trace_assess, "ASSESS ") \
EM(netfs_rreq_trace_copy, "COPY ") \ EM(netfs_rreq_trace_copy, "COPY ") \
...@@ -101,6 +106,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); ...@@ -101,6 +106,7 @@ enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte);
#define E_(a, b) TRACE_DEFINE_ENUM(a); #define E_(a, b) TRACE_DEFINE_ENUM(a);
netfs_read_traces; netfs_read_traces;
netfs_rreq_origins;
netfs_rreq_traces; netfs_rreq_traces;
netfs_sreq_sources; netfs_sreq_sources;
netfs_sreq_traces; netfs_sreq_traces;
...@@ -159,17 +165,20 @@ TRACE_EVENT(netfs_rreq, ...@@ -159,17 +165,20 @@ TRACE_EVENT(netfs_rreq,
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, rreq ) __field(unsigned int, rreq )
__field(unsigned int, flags ) __field(unsigned int, flags )
__field(enum netfs_io_origin, origin )
__field(enum netfs_rreq_trace, what ) __field(enum netfs_rreq_trace, what )
), ),
TP_fast_assign( TP_fast_assign(
__entry->rreq = rreq->debug_id; __entry->rreq = rreq->debug_id;
__entry->flags = rreq->flags; __entry->flags = rreq->flags;
__entry->origin = rreq->origin;
__entry->what = what; __entry->what = what;
), ),
TP_printk("R=%08x %s f=%02x", TP_printk("R=%08x %s %s f=%02x",
__entry->rreq, __entry->rreq,
__print_symbolic(__entry->origin, netfs_rreq_origins),
__print_symbolic(__entry->what, netfs_rreq_traces), __print_symbolic(__entry->what, netfs_rreq_traces),
__entry->flags) __entry->flags)
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment