Commit 32ed4a62 authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Btree path tracepoints

Fastpath tracepoints, rarely needed, only enabled with
CONFIG_BCACHEFS_PATH_TRACEPOINTS.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent abbfc4db
...@@ -87,6 +87,13 @@ config BCACHEFS_SIX_OPTIMISTIC_SPIN ...@@ -87,6 +87,13 @@ config BCACHEFS_SIX_OPTIMISTIC_SPIN
is held by another thread, spin for a short while, as long as the is held by another thread, spin for a short while, as long as the
thread owning the lock is running. thread owning the lock is running.
config BCACHEFS_PATH_TRACEPOINTS
bool "Extra btree_path tracepoints"
depends on BCACHEFS_FS
help
Enable extra tracepoints for debugging btree_path operations; we don't
normally want these enabled because they happen at very high rates.
config MEAN_AND_VARIANCE_UNIT_TEST config MEAN_AND_VARIANCE_UNIT_TEST
tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS
depends on KUNIT depends on KUNIT
......
...@@ -1131,6 +1131,8 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans, ...@@ -1131,6 +1131,8 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
if (unlikely(!trans->srcu_held)) if (unlikely(!trans->srcu_held))
bch2_trans_srcu_lock(trans); bch2_trans_srcu_lock(trans);
trace_btree_path_traverse_start(trans, path);
/* /*
* Ensure we obey path->should_be_locked: if it's set, we can't unlock * Ensure we obey path->should_be_locked: if it's set, we can't unlock
* and re-traverse the path without a transaction restart: * and re-traverse the path without a transaction restart:
...@@ -1194,6 +1196,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans, ...@@ -1194,6 +1196,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
out_uptodate: out_uptodate:
path->uptodate = BTREE_ITER_UPTODATE; path->uptodate = BTREE_ITER_UPTODATE;
trace_btree_path_traverse_end(trans, path);
out: out:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted) if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
panic("ret %s (%i) trans->restarted %s (%i)\n", panic("ret %s (%i) trans->restarted %s (%i)\n",
...@@ -1236,8 +1239,10 @@ __flatten ...@@ -1236,8 +1239,10 @@ __flatten
btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans, btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
btree_path_idx_t path, bool intent, unsigned long ip) btree_path_idx_t path, bool intent, unsigned long ip)
{ {
struct btree_path *old = trans->paths + path;
__btree_path_put(trans, trans->paths + path, intent); __btree_path_put(trans, trans->paths + path, intent);
path = btree_path_clone(trans, path, intent, ip); path = btree_path_clone(trans, path, intent, ip);
trace_btree_path_clone(trans, old, trans->paths + path);
trans->paths[path].preserve = false; trans->paths[path].preserve = false;
return path; return path;
} }
...@@ -1252,6 +1257,8 @@ __bch2_btree_path_set_pos(struct btree_trans *trans, ...@@ -1252,6 +1257,8 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
bch2_trans_verify_not_in_restart(trans); bch2_trans_verify_not_in_restart(trans);
EBUG_ON(!trans->paths[path_idx].ref); EBUG_ON(!trans->paths[path_idx].ref);
trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip); path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
struct btree_path *path = trans->paths + path_idx; struct btree_path *path = trans->paths + path_idx;
...@@ -1368,6 +1375,8 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in ...@@ -1368,6 +1375,8 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
? have_path_at_pos(trans, path) ? have_path_at_pos(trans, path)
: have_node_at_pos(trans, path); : have_node_at_pos(trans, path);
trace_btree_path_free(trans, path_idx, dup);
if (!dup && !(!path->preserve && !is_btree_node(path, path->level))) if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
return; return;
...@@ -1421,8 +1430,8 @@ void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans) ...@@ -1421,8 +1430,8 @@ void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
noinline __cold noinline __cold
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans) void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
{ {
prt_printf(buf, "transaction updates for %s journal seq %llu\n", prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
trans->fn, trans->journal_res.seq); trans->nr_updates, trans->fn, trans->journal_res.seq);
printbuf_indent_add(buf, 2); printbuf_indent_add(buf, 2);
trans_for_each_update(trans, i) { trans_for_each_update(trans, i) {
...@@ -1464,7 +1473,7 @@ static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_tra ...@@ -1464,7 +1473,7 @@ static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_tra
{ {
struct btree_path *path = trans->paths + path_idx; struct btree_path *path = trans->paths + path_idx;
prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ", prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ",
path_idx, path->ref, path->intent_ref, path_idx, path->ref, path->intent_ref,
path->preserve ? 'P' : ' ', path->preserve ? 'P' : ' ',
path->should_be_locked ? 'S' : ' ', path->should_be_locked ? 'S' : ' ',
...@@ -1716,6 +1725,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans, ...@@ -1716,6 +1725,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
trans->paths[path_pos].cached == cached && trans->paths[path_pos].cached == cached &&
trans->paths[path_pos].btree_id == btree_id && trans->paths[path_pos].btree_id == btree_id &&
trans->paths[path_pos].level == level) { trans->paths[path_pos].level == level) {
trace_btree_path_get(trans, trans->paths + path_pos, &pos);
__btree_path_get(trans, trans->paths + path_pos, intent); __btree_path_get(trans, trans->paths + path_pos, intent);
path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip); path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
path = trans->paths + path_idx; path = trans->paths + path_idx;
...@@ -1738,6 +1749,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans, ...@@ -1738,6 +1749,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
path->ip_allocated = ip; path->ip_allocated = ip;
#endif #endif
trans->paths_sorted = false; trans->paths_sorted = false;
trace_btree_path_alloc(trans, path);
} }
if (!(flags & BTREE_ITER_nopreserve)) if (!(flags & BTREE_ITER_nopreserve))
...@@ -1857,7 +1870,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter) ...@@ -1857,7 +1870,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
struct btree_path *path = btree_iter_path(trans, iter); struct btree_path *path = btree_iter_path(trans, iter);
if (btree_path_node(path, path->level)) if (btree_path_node(path, path->level))
btree_path_set_should_be_locked(path); btree_path_set_should_be_locked(trans, path);
return 0; return 0;
} }
...@@ -1889,7 +1902,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) ...@@ -1889,7 +1902,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p, iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_intent, iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter)); btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(btree_iter_path(trans, iter)); btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out: out:
bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
...@@ -1983,7 +1996,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) ...@@ -1983,7 +1996,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p, iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_intent, iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter)); btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(btree_iter_path(trans, iter)); btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
EBUG_ON(btree_iter_path(trans, iter)->uptodate); EBUG_ON(btree_iter_path(trans, iter)->uptodate);
out: out:
bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify_entry_exit(iter);
...@@ -2155,7 +2168,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos ...@@ -2155,7 +2168,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
if (unlikely(ret)) if (unlikely(ret))
return bkey_s_c_err(ret); return bkey_s_c_err(ret);
btree_path_set_should_be_locked(trans->paths + iter->key_cache_path); btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u); k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
if (k.k && !bkey_err(k)) { if (k.k && !bkey_err(k)) {
...@@ -2199,7 +2212,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp ...@@ -2199,7 +2212,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
goto out; goto out;
} }
btree_path_set_should_be_locked(path); btree_path_set_should_be_locked(trans, path);
k = btree_path_level_peek_all(trans->c, l, &iter->k); k = btree_path_level_peek_all(trans->c, l, &iter->k);
...@@ -2382,14 +2395,14 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e ...@@ -2382,14 +2395,14 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
iter->flags & BTREE_ITER_intent, iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter)); btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(btree_iter_path(trans, iter)); btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked: out_no_locked:
if (iter->update_path) { if (iter->update_path) {
ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_); ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
if (unlikely(ret)) if (unlikely(ret))
k = bkey_s_c_err(ret); k = bkey_s_c_err(ret);
else else
btree_path_set_should_be_locked(trans->paths + iter->update_path); btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
} }
if (!(iter->flags & BTREE_ITER_all_snapshots)) if (!(iter->flags & BTREE_ITER_all_snapshots))
...@@ -2511,6 +2524,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) ...@@ -2511,6 +2524,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
iter->flags & BTREE_ITER_intent, iter->flags & BTREE_ITER_intent,
_THIS_IP_); _THIS_IP_);
path = btree_iter_path(trans, iter); path = btree_iter_path(trans, iter);
trace_btree_path_save_pos(trans, path, trans->paths + saved_path);
saved_k = *k.k; saved_k = *k.k;
saved_v = k.v; saved_v = k.v;
} }
...@@ -2527,7 +2541,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter) ...@@ -2527,7 +2541,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
continue; continue;
} }
btree_path_set_should_be_locked(path); btree_path_set_should_be_locked(trans, path);
break; break;
} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) { } else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
/* Advance to previous leaf node: */ /* Advance to previous leaf node: */
...@@ -2685,7 +2699,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter) ...@@ -2685,7 +2699,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
} }
} }
out: out:
btree_path_set_should_be_locked(btree_iter_path(trans, iter)); btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked: out_no_locked:
bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
......
...@@ -31,6 +31,7 @@ static inline void __btree_path_get(struct btree_trans *trans, struct btree_path ...@@ -31,6 +31,7 @@ static inline void __btree_path_get(struct btree_trans *trans, struct btree_path
path->ref++; path->ref++;
path->intent_ref += intent; path->intent_ref += intent;
trace_btree_path_get_ll(trans, path);
} }
static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent) static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
...@@ -39,6 +40,7 @@ static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path ...@@ -39,6 +40,7 @@ static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path
EBUG_ON(!path->ref); EBUG_ON(!path->ref);
EBUG_ON(!path->intent_ref && intent); EBUG_ON(!path->intent_ref && intent);
trace_btree_path_put_ll(trans, path);
path->intent_ref -= intent; path->intent_ref -= intent;
return --path->ref == 0; return --path->ref == 0;
} }
......
...@@ -228,6 +228,9 @@ static inline int __btree_node_lock_nopath(struct btree_trans *trans, ...@@ -228,6 +228,9 @@ static inline int __btree_node_lock_nopath(struct btree_trans *trans,
bch2_six_check_for_deadlock, trans, ip); bch2_six_check_for_deadlock, trans, ip);
WRITE_ONCE(trans->locking, NULL); WRITE_ONCE(trans->locking, NULL);
WRITE_ONCE(trans->locking_wait.start_time, 0); WRITE_ONCE(trans->locking_wait.start_time, 0);
if (!ret)
trace_btree_path_lock(trans, _THIS_IP_, b);
return ret; return ret;
} }
...@@ -400,12 +403,13 @@ static inline int bch2_btree_path_upgrade(struct btree_trans *trans, ...@@ -400,12 +403,13 @@ static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
/* misc: */ /* misc: */
static inline void btree_path_set_should_be_locked(struct btree_path *path) static inline void btree_path_set_should_be_locked(struct btree_trans *trans, struct btree_path *path)
{ {
EBUG_ON(!btree_node_locked(path, path->level)); EBUG_ON(!btree_node_locked(path, path->level));
EBUG_ON(path->uptodate); EBUG_ON(path->uptodate);
path->should_be_locked = true; path->should_be_locked = true;
trace_btree_path_should_be_locked(trans, path);
} }
static inline void __btree_path_set_level_up(struct btree_trans *trans, static inline void __btree_path_set_level_up(struct btree_trans *trans,
......
...@@ -374,7 +374,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans, ...@@ -374,7 +374,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
i->key_cache_already_flushed = true; i->key_cache_already_flushed = true;
i->flags |= BTREE_TRIGGER_norun; i->flags |= BTREE_TRIGGER_norun;
btree_path_set_should_be_locked(btree_path); btree_path_set_should_be_locked(trans, btree_path);
ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip); ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
out: out:
bch2_path_put(trans, path_idx, true); bch2_path_put(trans, path_idx, true);
...@@ -422,7 +422,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx, ...@@ -422,7 +422,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
break; break;
} }
if (!cmp && i < trans->updates + trans->nr_updates) { bool overwrite = !cmp && i < trans->updates + trans->nr_updates;
if (overwrite) {
EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run); EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
bch2_path_put(trans, i->path, true); bch2_path_put(trans, i->path, true);
...@@ -451,6 +453,8 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx, ...@@ -451,6 +453,8 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
__btree_path_get(trans, trans->paths + i->path, true); __btree_path_get(trans, trans->paths + i->path, true);
trace_update_by_path(trans, path, i, overwrite);
/* /*
* If a key is present in the key cache, it must also exist in the * If a key is present in the key cache, it must also exist in the
* btree - this is necessary for cache coherency. When iterating over * btree - this is necessary for cache coherency. When iterating over
...@@ -498,7 +502,7 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans, ...@@ -498,7 +502,7 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced); return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
} }
btree_path_set_should_be_locked(trans->paths + iter->key_cache_path); btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
} }
return 0; return 0;
......
...@@ -1981,7 +1981,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, ...@@ -1981,7 +1981,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
if (ret) if (ret)
goto err; goto err;
btree_path_set_should_be_locked(trans->paths + sib_path); btree_path_set_should_be_locked(trans, trans->paths + sib_path);
m = trans->paths[sib_path].l[level].b; m = trans->paths[sib_path].l[level].b;
......
...@@ -3,7 +3,6 @@ ...@@ -3,7 +3,6 @@
#define TRACE_SYSTEM bcachefs #define TRACE_SYSTEM bcachefs
#if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ) #if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_BCACHEFS_H
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
...@@ -558,6 +557,7 @@ TRACE_EVENT(btree_path_relock_fail, ...@@ -558,6 +557,7 @@ TRACE_EVENT(btree_path_relock_fail,
__field(unsigned long, caller_ip ) __field(unsigned long, caller_ip )
__field(u8, btree_id ) __field(u8, btree_id )
__field(u8, level ) __field(u8, level )
__field(u8, path_idx)
TRACE_BPOS_entries(pos) TRACE_BPOS_entries(pos)
__array(char, node, 24 ) __array(char, node, 24 )
__field(u8, self_read_count ) __field(u8, self_read_count )
...@@ -575,7 +575,8 @@ TRACE_EVENT(btree_path_relock_fail, ...@@ -575,7 +575,8 @@ TRACE_EVENT(btree_path_relock_fail,
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn)); strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id; __entry->btree_id = path->btree_id;
__entry->level = path->level; __entry->level = level;
__entry->path_idx = path - trans->paths;
TRACE_BPOS_assign(pos, path->pos); TRACE_BPOS_assign(pos, path->pos);
c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level); c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
...@@ -588,7 +589,7 @@ TRACE_EVENT(btree_path_relock_fail, ...@@ -588,7 +589,7 @@ TRACE_EVENT(btree_path_relock_fail,
c = six_lock_counts(&path->l[level].b->c.lock); c = six_lock_counts(&path->l[level].b->c.lock);
__entry->read_count = c.n[SIX_LOCK_read]; __entry->read_count = c.n[SIX_LOCK_read];
__entry->intent_count = c.n[SIX_LOCK_intent]; __entry->intent_count = c.n[SIX_LOCK_intent];
scnprintf(__entry->node, sizeof(__entry->node), "%px", b); scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
} }
__entry->iter_lock_seq = path->l[level].lock_seq; __entry->iter_lock_seq = path->l[level].lock_seq;
__entry->node_lock_seq = is_btree_node(path, level) __entry->node_lock_seq = is_btree_node(path, level)
...@@ -596,9 +597,10 @@ TRACE_EVENT(btree_path_relock_fail, ...@@ -596,9 +597,10 @@ TRACE_EVENT(btree_path_relock_fail,
: 0; : 0;
), ),
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u", TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
__entry->trans_fn, __entry->trans_fn,
(void *) __entry->caller_ip, (void *) __entry->caller_ip,
__entry->path_idx,
bch2_btree_id_str(__entry->btree_id), bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode, __entry->pos_inode,
__entry->pos_offset, __entry->pos_offset,
...@@ -625,6 +627,7 @@ TRACE_EVENT(btree_path_upgrade_fail, ...@@ -625,6 +627,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
__field(unsigned long, caller_ip ) __field(unsigned long, caller_ip )
__field(u8, btree_id ) __field(u8, btree_id )
__field(u8, level ) __field(u8, level )
__field(u8, path_idx)
TRACE_BPOS_entries(pos) TRACE_BPOS_entries(pos)
__field(u8, locked ) __field(u8, locked )
__field(u8, self_read_count ) __field(u8, self_read_count )
...@@ -642,6 +645,7 @@ TRACE_EVENT(btree_path_upgrade_fail, ...@@ -642,6 +645,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
__entry->btree_id = path->btree_id; __entry->btree_id = path->btree_id;
__entry->level = level; __entry->level = level;
__entry->path_idx = path - trans->paths;
TRACE_BPOS_assign(pos, path->pos); TRACE_BPOS_assign(pos, path->pos);
__entry->locked = btree_node_locked(path, level); __entry->locked = btree_node_locked(path, level);
...@@ -657,9 +661,10 @@ TRACE_EVENT(btree_path_upgrade_fail, ...@@ -657,9 +661,10 @@ TRACE_EVENT(btree_path_upgrade_fail,
: 0; : 0;
), ),
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u", TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
__entry->trans_fn, __entry->trans_fn,
(void *) __entry->caller_ip, (void *) __entry->caller_ip,
__entry->path_idx,
bch2_btree_id_str(__entry->btree_id), bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode, __entry->pos_inode,
__entry->pos_offset, __entry->pos_offset,
...@@ -1438,6 +1443,456 @@ TRACE_EVENT(error_downcast, ...@@ -1438,6 +1443,456 @@ TRACE_EVENT(error_downcast,
TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip) TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
); );
#ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
TRACE_EVENT(update_by_path,
TP_PROTO(struct btree_trans *trans, struct btree_path *path,
struct btree_insert_entry *i, bool overwrite),
TP_ARGS(trans, path, i, overwrite),
TP_STRUCT__entry(
__array(char, trans_fn, 32 )
__field(btree_path_idx_t, path_idx )
__field(u8, btree_id )
TRACE_BPOS_entries(pos)
__field(u8, overwrite )
__field(btree_path_idx_t, update_idx )
__field(btree_path_idx_t, nr_updates )
),
TP_fast_assign(
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->path_idx = path - trans->paths;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(pos, path->pos);
__entry->overwrite = overwrite;
__entry->update_idx = i - trans->updates;
__entry->nr_updates = trans->nr_updates;
),
TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
__entry->trans_fn,
__entry->path_idx,
bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
__entry->overwrite,
__entry->update_idx,
__entry->nr_updates)
);
TRACE_EVENT(btree_path_lock,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
struct btree_bkey_cached_common *b),
TP_ARGS(trans, caller_ip, b),
TP_STRUCT__entry(
__array(char, trans_fn, 32 )
__field(unsigned long, caller_ip )
__field(u8, btree_id )
__field(u8, level )
__array(char, node, 24 )
__field(u32, lock_seq )
),
TP_fast_assign(
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->caller_ip = caller_ip;
__entry->btree_id = b->btree_id;
__entry->level = b->level;
scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
__entry->lock_seq = six_lock_seq(&b->lock);
),
TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
__entry->trans_fn,
(void *) __entry->caller_ip,
bch2_btree_id_str(__entry->btree_id),
__entry->level,
__entry->node,
__entry->lock_seq)
);
DECLARE_EVENT_CLASS(btree_path_ev,
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
TP_ARGS(trans, path),
TP_STRUCT__entry(
__field(u16, idx )
__field(u8, ref )
__field(u8, btree_id )
TRACE_BPOS_entries(pos)
),
TP_fast_assign(
__entry->idx = path - trans->paths;
__entry->ref = path->ref;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(pos, path->pos);
),
TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
__entry->idx, __entry->ref,
bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot)
);
DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
TP_ARGS(trans, path)
);
DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
TP_ARGS(trans, path)
);
DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
TP_ARGS(trans, path)
);
TRACE_EVENT(btree_path_alloc,
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
TP_ARGS(trans, path),
TP_STRUCT__entry(
__field(btree_path_idx_t, idx )
__field(u8, locks_want )
__field(u8, btree_id )
TRACE_BPOS_entries(pos)
),
TP_fast_assign(
__entry->idx = path - trans->paths;
__entry->locks_want = path->locks_want;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(pos, path->pos);
),
TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
__entry->idx,
bch2_btree_id_str(__entry->btree_id),
__entry->locks_want,
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot)
);
TRACE_EVENT(btree_path_get,
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
TP_ARGS(trans, path, new_pos),
TP_STRUCT__entry(
__field(btree_path_idx_t, idx )
__field(u8, ref )
__field(u8, preserve )
__field(u8, locks_want )
__field(u8, btree_id )
TRACE_BPOS_entries(old_pos)
TRACE_BPOS_entries(new_pos)
),
TP_fast_assign(
__entry->idx = path - trans->paths;
__entry->ref = path->ref;
__entry->preserve = path->preserve;
__entry->locks_want = path->locks_want;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(old_pos, path->pos);
TRACE_BPOS_assign(new_pos, *new_pos);
),
TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
__entry->idx,
__entry->ref,
__entry->preserve,
bch2_btree_id_str(__entry->btree_id),
__entry->locks_want,
__entry->old_pos_inode,
__entry->old_pos_offset,
__entry->old_pos_snapshot,
__entry->new_pos_inode,
__entry->new_pos_offset,
__entry->new_pos_snapshot)
);
DECLARE_EVENT_CLASS(btree_path_clone,
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
TP_ARGS(trans, path, new),
TP_STRUCT__entry(
__field(btree_path_idx_t, idx )
__field(u8, new_idx )
__field(u8, btree_id )
__field(u8, ref )
__field(u8, preserve )
TRACE_BPOS_entries(pos)
),
TP_fast_assign(
__entry->idx = path - trans->paths;
__entry->new_idx = new - trans->paths;
__entry->btree_id = path->btree_id;
__entry->ref = path->ref;
__entry->preserve = path->preserve;
TRACE_BPOS_assign(pos, path->pos);
),
TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
__entry->idx,
__entry->ref,
__entry->preserve,
bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
__entry->new_idx)
);
DEFINE_EVENT(btree_path_clone, btree_path_clone,
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
TP_ARGS(trans, path, new)
);
DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
TP_ARGS(trans, path, new)
);
DECLARE_EVENT_CLASS(btree_path_traverse,
TP_PROTO(struct btree_trans *trans,
struct btree_path *path),
TP_ARGS(trans, path),
TP_STRUCT__entry(
__array(char, trans_fn, 32 )
__field(btree_path_idx_t, idx )
__field(u8, ref )
__field(u8, preserve )
__field(u8, should_be_locked )
__field(u8, btree_id )
__field(u8, level )
TRACE_BPOS_entries(pos)
__field(u8, locks_want )
__field(u8, nodes_locked )
__array(char, node0, 24 )
__array(char, node1, 24 )
__array(char, node2, 24 )
__array(char, node3, 24 )
),
TP_fast_assign(
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->idx = path - trans->paths;
__entry->ref = path->ref;
__entry->preserve = path->preserve;
__entry->btree_id = path->btree_id;
__entry->level = path->level;
TRACE_BPOS_assign(pos, path->pos);
__entry->locks_want = path->locks_want;
__entry->nodes_locked = path->nodes_locked;
struct btree *b = path->l[0].b;
if (IS_ERR(b))
strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
b = path->l[1].b;
if (IS_ERR(b))
strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
b = path->l[2].b;
if (IS_ERR(b))
strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
b = path->l[3].b;
if (IS_ERR(b))
strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
),
TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
"locks %u %u %u %u node %s %s %s %s",
__entry->trans_fn,
__entry->idx,
__entry->ref,
__entry->preserve,
bch2_btree_id_str(__entry->btree_id),
__entry->pos_inode,
__entry->pos_offset,
__entry->pos_snapshot,
__entry->level,
__entry->locks_want,
(__entry->nodes_locked >> 6) & 3,
(__entry->nodes_locked >> 4) & 3,
(__entry->nodes_locked >> 2) & 3,
(__entry->nodes_locked >> 0) & 3,
__entry->node3,
__entry->node2,
__entry->node1,
__entry->node0)
);
DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
TP_PROTO(struct btree_trans *trans,
struct btree_path *path),
TP_ARGS(trans, path)
);
DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
TP_ARGS(trans, path)
);
TRACE_EVENT(btree_path_set_pos,
TP_PROTO(struct btree_trans *trans,
struct btree_path *path,
struct bpos *new_pos),
TP_ARGS(trans, path, new_pos),
TP_STRUCT__entry(
__field(btree_path_idx_t, idx )
__field(u8, ref )
__field(u8, preserve )
__field(u8, btree_id )
TRACE_BPOS_entries(old_pos)
TRACE_BPOS_entries(new_pos)
__field(u8, locks_want )
__field(u8, nodes_locked )
__array(char, node0, 24 )
__array(char, node1, 24 )
__array(char, node2, 24 )
__array(char, node3, 24 )
),
TP_fast_assign(
__entry->idx = path - trans->paths;
__entry->ref = path->ref;
__entry->preserve = path->preserve;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(old_pos, path->pos);
TRACE_BPOS_assign(new_pos, *new_pos);
__entry->nodes_locked = path->nodes_locked;
struct btree *b = path->l[0].b;
if (IS_ERR(b))
strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
b = path->l[1].b;
if (IS_ERR(b))
strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
b = path->l[2].b;
if (IS_ERR(b))
strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
b = path->l[3].b;
if (IS_ERR(b))
strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
),
TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
"locks %u %u %u %u node %s %s %s %s",
__entry->idx,
__entry->ref,
__entry->preserve,
bch2_btree_id_str(__entry->btree_id),
__entry->old_pos_inode,
__entry->old_pos_offset,
__entry->old_pos_snapshot,
__entry->new_pos_inode,
__entry->new_pos_offset,
__entry->new_pos_snapshot,
(__entry->nodes_locked >> 6) & 3,
(__entry->nodes_locked >> 4) & 3,
(__entry->nodes_locked >> 2) & 3,
(__entry->nodes_locked >> 0) & 3,
__entry->node3,
__entry->node2,
__entry->node1,
__entry->node0)
);
TRACE_EVENT(btree_path_free,
TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
TP_ARGS(trans, path, dup),
TP_STRUCT__entry(
__field(btree_path_idx_t, idx )
__field(u8, preserve )
__field(u8, should_be_locked)
__field(s8, dup )
__field(u8, dup_locked )
),
TP_fast_assign(
__entry->idx = path;
__entry->preserve = trans->paths[path].preserve;
__entry->should_be_locked = trans->paths[path].should_be_locked;
__entry->dup = dup ? dup - trans->paths : -1;
__entry->dup_locked = dup ? btree_node_locked(dup, dup->level) : 0;
),
TP_printk(" path %3u %c %c dup %2i locked %u", __entry->idx,
__entry->preserve ? 'P' : ' ',
__entry->should_be_locked ? 'S' : ' ',
__entry->dup,
__entry->dup_locked)
);
TRACE_EVENT(btree_path_free_trans_begin,
TP_PROTO(btree_path_idx_t path),
TP_ARGS(path),
TP_STRUCT__entry(
__field(btree_path_idx_t, idx )
),
TP_fast_assign(
__entry->idx = path;
),
TP_printk(" path %3u", __entry->idx)
);
#else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
#ifndef _TRACE_BCACHEFS_H
static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
struct btree_insert_entry *i, bool overwrite) {}
static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
#endif
#endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
#define _TRACE_BCACHEFS_H
#endif /* _TRACE_BCACHEFS_H */ #endif /* _TRACE_BCACHEFS_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment