Commit de74023b authored by David Howells's avatar David Howells

netfs: Trace refcounting on the netfs_io_request struct

Add refcount tracing for the netfs_io_request structure.

Changes
=======
ver #3)
 - Switch 'W=' to 'R=' in the traceline to match other request debug IDs.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com

Link: https://lore.kernel.org/r/164622997668.3564931.14456171619219324968.stgit@warthog.procyon.org.uk/ # v1
Link: https://lore.kernel.org/r/164678200943.1200972.7241495532327787765.stgit@warthog.procyon.org.uk/ # v2
Link: https://lore.kernel.org/r/164692900920.2099075.11847712419940675791.stgit@warthog.procyon.org.uk/ # v3
parent 18b3ff9f
...@@ -20,13 +20,20 @@ ...@@ -20,13 +20,20 @@
struct netfs_io_request *netfs_alloc_request(const struct netfs_request_ops *ops, struct netfs_io_request *netfs_alloc_request(const struct netfs_request_ops *ops,
void *netfs_priv, void *netfs_priv,
struct file *file); struct file *file);
void netfs_get_request(struct netfs_io_request *rreq); void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async); void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
void netfs_put_request(struct netfs_io_request *rreq, bool was_async); void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
enum netfs_rreq_ref_trace what);
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq); struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async); void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq); void netfs_get_subrequest(struct netfs_io_subrequest *subreq);
static inline void netfs_see_request(struct netfs_io_request *rreq,
enum netfs_rreq_ref_trace what)
{
trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
}
/* /*
* read_helper.c * read_helper.c
*/ */
......
...@@ -27,7 +27,7 @@ struct netfs_io_request *netfs_alloc_request( ...@@ -27,7 +27,7 @@ struct netfs_io_request *netfs_alloc_request(
rreq->debug_id = atomic_inc_return(&debug_ids); rreq->debug_id = atomic_inc_return(&debug_ids);
INIT_LIST_HEAD(&rreq->subrequests); INIT_LIST_HEAD(&rreq->subrequests);
INIT_WORK(&rreq->work, netfs_rreq_work); INIT_WORK(&rreq->work, netfs_rreq_work);
refcount_set(&rreq->usage, 1); refcount_set(&rreq->ref, 1);
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
if (ops->init_request) if (ops->init_request)
ops->init_request(rreq, file); ops->init_request(rreq, file);
...@@ -37,9 +37,12 @@ struct netfs_io_request *netfs_alloc_request( ...@@ -37,9 +37,12 @@ struct netfs_io_request *netfs_alloc_request(
return rreq; return rreq;
} }
void netfs_get_request(struct netfs_io_request *rreq) void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
{ {
refcount_inc(&rreq->usage); int r;
__refcount_inc(&rreq->ref, &r);
trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
} }
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
...@@ -68,9 +71,16 @@ static void netfs_free_request(struct work_struct *work) ...@@ -68,9 +71,16 @@ static void netfs_free_request(struct work_struct *work)
netfs_stat_d(&netfs_n_rh_rreq); netfs_stat_d(&netfs_n_rh_rreq);
} }
void netfs_put_request(struct netfs_io_request *rreq, bool was_async) void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
enum netfs_rreq_ref_trace what)
{ {
if (refcount_dec_and_test(&rreq->usage)) { unsigned int debug_id = rreq->debug_id;
bool dead;
int r;
dead = __refcount_dec_and_test(&rreq->ref, &r);
trace_netfs_rreq_ref(debug_id, r - 1, what);
if (dead) {
if (was_async) { if (was_async) {
rreq->work.func = netfs_free_request; rreq->work.func = netfs_free_request;
if (!queue_work(system_unbound_wq, &rreq->work)) if (!queue_work(system_unbound_wq, &rreq->work))
...@@ -93,7 +103,7 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq ...@@ -93,7 +103,7 @@ struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq
INIT_LIST_HEAD(&subreq->rreq_link); INIT_LIST_HEAD(&subreq->rreq_link);
refcount_set(&subreq->usage, 2); refcount_set(&subreq->usage, 2);
subreq->rreq = rreq; subreq->rreq = rreq;
netfs_get_request(rreq); netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
netfs_stat(&netfs_n_rh_sreq); netfs_stat(&netfs_n_rh_sreq);
} }
...@@ -113,7 +123,7 @@ static void __netfs_put_subrequest(struct netfs_io_subrequest *subreq, ...@@ -113,7 +123,7 @@ static void __netfs_put_subrequest(struct netfs_io_subrequest *subreq,
trace_netfs_sreq(subreq, netfs_sreq_trace_free); trace_netfs_sreq(subreq, netfs_sreq_trace_free);
kfree(subreq); kfree(subreq);
netfs_stat_d(&netfs_n_rh_sreq); netfs_stat_d(&netfs_n_rh_sreq);
netfs_put_request(rreq, was_async); netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
} }
void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async) void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async)
......
...@@ -109,7 +109,7 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) ...@@ -109,7 +109,7 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
{ {
trace_netfs_rreq(rreq, netfs_rreq_trace_done); trace_netfs_rreq(rreq, netfs_rreq_trace_done);
netfs_clear_subrequests(rreq, was_async); netfs_clear_subrequests(rreq, was_async);
netfs_put_request(rreq, was_async); netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
} }
/* /*
...@@ -799,7 +799,7 @@ void netfs_readahead(struct readahead_control *ractl, ...@@ -799,7 +799,7 @@ void netfs_readahead(struct readahead_control *ractl,
return; return;
cleanup_free: cleanup_free:
netfs_put_request(rreq, false); netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
return; return;
cleanup: cleanup:
if (netfs_priv) if (netfs_priv)
...@@ -858,7 +858,7 @@ int netfs_readpage(struct file *file, ...@@ -858,7 +858,7 @@ int netfs_readpage(struct file *file,
netfs_stat(&netfs_n_rh_readpage); netfs_stat(&netfs_n_rh_readpage);
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
netfs_get_request(rreq); netfs_get_request(rreq, netfs_rreq_trace_get_hold);
atomic_set(&rreq->nr_outstanding, 1); atomic_set(&rreq->nr_outstanding, 1);
do { do {
...@@ -883,7 +883,7 @@ int netfs_readpage(struct file *file, ...@@ -883,7 +883,7 @@ int netfs_readpage(struct file *file,
ret = -EIO; ret = -EIO;
} }
out: out:
netfs_put_request(rreq, false); netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
return ret; return ret;
} }
EXPORT_SYMBOL(netfs_readpage); EXPORT_SYMBOL(netfs_readpage);
...@@ -1030,13 +1030,13 @@ int netfs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1030,13 +1030,13 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
*/ */
ractl._nr_pages = folio_nr_pages(folio); ractl._nr_pages = folio_nr_pages(folio);
netfs_rreq_expand(rreq, &ractl); netfs_rreq_expand(rreq, &ractl);
netfs_get_request(rreq);
/* We hold the folio locks, so we can drop the references */ /* We hold the folio locks, so we can drop the references */
folio_get(folio); folio_get(folio);
while (readahead_folio(&ractl)) while (readahead_folio(&ractl))
; ;
netfs_get_request(rreq, netfs_rreq_trace_get_hold);
atomic_set(&rreq->nr_outstanding, 1); atomic_set(&rreq->nr_outstanding, 1);
do { do {
if (!netfs_rreq_submit_slice(rreq, &debug_index)) if (!netfs_rreq_submit_slice(rreq, &debug_index))
...@@ -1062,7 +1062,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1062,7 +1062,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin); trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_write_begin);
ret = -EIO; ret = -EIO;
} }
netfs_put_request(rreq, false); netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
if (ret < 0) if (ret < 0)
goto error; goto error;
...@@ -1078,7 +1078,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping, ...@@ -1078,7 +1078,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
return 0; return 0;
error_put: error_put:
netfs_put_request(rreq, false); netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
error: error:
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
......
...@@ -168,7 +168,7 @@ struct netfs_io_request { ...@@ -168,7 +168,7 @@ struct netfs_io_request {
loff_t i_size; /* Size of the file */ loff_t i_size; /* Size of the file */
loff_t start; /* Start position */ loff_t start; /* Start position */
pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */
refcount_t usage; refcount_t ref;
unsigned long flags; unsigned long flags;
#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */ #define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */
#define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */ #define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */
......
...@@ -55,6 +55,15 @@ ...@@ -55,6 +55,15 @@
EM(netfs_fail_short_write_begin, "short-write-begin") \ EM(netfs_fail_short_write_begin, "short-write-begin") \
E_(netfs_fail_prepare_write, "prep-write") E_(netfs_fail_prepare_write, "prep-write")
#define netfs_rreq_ref_traces \
EM(netfs_rreq_trace_get_hold, "GET HOLD ") \
EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \
EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \
EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \
EM(netfs_rreq_trace_put_hold, "PUT HOLD ") \
EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \
E_(netfs_rreq_trace_new, "NEW ")
#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
...@@ -67,6 +76,7 @@ enum netfs_read_trace { netfs_read_traces } __mode(byte); ...@@ -67,6 +76,7 @@ enum netfs_read_trace { netfs_read_traces } __mode(byte);
enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte); enum netfs_rreq_trace { netfs_rreq_traces } __mode(byte);
enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte); enum netfs_sreq_trace { netfs_sreq_traces } __mode(byte);
enum netfs_failure { netfs_failures } __mode(byte); enum netfs_failure { netfs_failures } __mode(byte);
enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte);
#endif #endif
...@@ -83,6 +93,7 @@ netfs_rreq_traces; ...@@ -83,6 +93,7 @@ netfs_rreq_traces;
netfs_sreq_sources; netfs_sreq_sources;
netfs_sreq_traces; netfs_sreq_traces;
netfs_failures; netfs_failures;
netfs_rreq_ref_traces;
/* /*
* Now redefine the EM() and E_() macros to map the enums to the strings that * Now redefine the EM() and E_() macros to map the enums to the strings that
...@@ -229,6 +240,30 @@ TRACE_EVENT(netfs_failure, ...@@ -229,6 +240,30 @@ TRACE_EVENT(netfs_failure,
__entry->error) __entry->error)
); );
TRACE_EVENT(netfs_rreq_ref,
TP_PROTO(unsigned int rreq_debug_id, int ref,
enum netfs_rreq_ref_trace what),
TP_ARGS(rreq_debug_id, ref, what),
TP_STRUCT__entry(
__field(unsigned int, rreq )
__field(int, ref )
__field(enum netfs_rreq_ref_trace, what )
),
TP_fast_assign(
__entry->rreq = rreq_debug_id;
__entry->ref = ref;
__entry->what = what;
),
TP_printk("R=%08x %s r=%u",
__entry->rreq,
__print_symbolic(__entry->what, netfs_rreq_ref_traces),
__entry->ref)
);
#undef EM #undef EM
#undef E_ #undef E_
#endif /* _TRACE_NETFS_H */ #endif /* _TRACE_NETFS_H */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment