Commit 2b4e4b8c authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Minor tracepoint improvements

Btree iterator tracepoints should print whether they're for the key
cache.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
parent 6e075b54
...@@ -197,6 +197,7 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade, ...@@ -197,6 +197,7 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter, bool upgrade,
(upgrade (upgrade
? trace_node_upgrade_fail ? trace_node_upgrade_fail
: trace_node_relock_fail)(iter->trans->ip, trace_ip, : trace_node_relock_fail)(iter->trans->ip, trace_ip,
btree_iter_type(iter) == BTREE_ITER_CACHED,
iter->btree_id, &iter->real_pos, iter->btree_id, &iter->real_pos,
l, iter->l[l].lock_seq, l, iter->l[l].lock_seq,
is_btree_node(iter, l) is_btree_node(iter, l)
...@@ -393,6 +394,7 @@ bool bch2_btree_iter_relock_intent(struct btree_iter *iter) ...@@ -393,6 +394,7 @@ bool bch2_btree_iter_relock_intent(struct btree_iter *iter)
l++) { l++) {
if (!bch2_btree_node_relock(iter, l)) { if (!bch2_btree_node_relock(iter, l)) {
trace_node_relock_fail(iter->trans->ip, _RET_IP_, trace_node_relock_fail(iter->trans->ip, _RET_IP_,
btree_iter_type(iter) == BTREE_ITER_CACHED,
iter->btree_id, &iter->real_pos, iter->btree_id, &iter->real_pos,
l, iter->l[l].lock_seq, l, iter->l[l].lock_seq,
is_btree_node(iter, l) is_btree_node(iter, l)
...@@ -1386,6 +1388,7 @@ static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter, ...@@ -1386,6 +1388,7 @@ static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
static int btree_iter_traverse_one(struct btree_iter *iter, static int btree_iter_traverse_one(struct btree_iter *iter,
unsigned long trace_ip) unsigned long trace_ip)
{ {
struct btree_trans *trans = iter->trans;
unsigned l, depth_want = iter->level; unsigned l, depth_want = iter->level;
int ret = 0; int ret = 0;
...@@ -1447,7 +1450,8 @@ static int btree_iter_traverse_one(struct btree_iter *iter, ...@@ -1447,7 +1450,8 @@ static int btree_iter_traverse_one(struct btree_iter *iter,
iter->uptodate = BTREE_ITER_NEED_PEEK; iter->uptodate = BTREE_ITER_NEED_PEEK;
out: out:
trace_iter_traverse(iter->trans->ip, trace_ip, trace_iter_traverse(trans->ip, trace_ip,
btree_iter_type(iter) == BTREE_ITER_CACHED,
iter->btree_id, &iter->real_pos, ret); iter->btree_id, &iter->real_pos, ret);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
return ret; return ret;
......
...@@ -775,14 +775,16 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse, ...@@ -775,14 +775,16 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_traverse,
TRACE_EVENT(iter_traverse, TRACE_EVENT(iter_traverse,
TP_PROTO(unsigned long trans_ip, TP_PROTO(unsigned long trans_ip,
unsigned long caller_ip, unsigned long caller_ip,
bool key_cache,
enum btree_id btree_id, enum btree_id btree_id,
struct bpos *pos, struct bpos *pos,
int ret), int ret),
TP_ARGS(trans_ip, caller_ip, btree_id, pos, ret), TP_ARGS(trans_ip, caller_ip, key_cache, btree_id, pos, ret),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned long, trans_ip ) __field(unsigned long, trans_ip )
__field(unsigned long, caller_ip ) __field(unsigned long, caller_ip )
__field(u8, key_cache )
__field(u8, btree_id ) __field(u8, btree_id )
__field(u64, pos_inode ) __field(u64, pos_inode )
__field(u64, pos_offset ) __field(u64, pos_offset )
...@@ -793,6 +795,7 @@ TRACE_EVENT(iter_traverse, ...@@ -793,6 +795,7 @@ TRACE_EVENT(iter_traverse,
TP_fast_assign( TP_fast_assign(
__entry->trans_ip = trans_ip; __entry->trans_ip = trans_ip;
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
__entry->key_cache = key_cache;
__entry->btree_id = btree_id; __entry->btree_id = btree_id;
__entry->pos_inode = pos->inode; __entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset; __entry->pos_offset = pos->offset;
...@@ -800,9 +803,10 @@ TRACE_EVENT(iter_traverse, ...@@ -800,9 +803,10 @@ TRACE_EVENT(iter_traverse,
__entry->ret = ret; __entry->ret = ret;
), ),
TP_printk("%ps %pS pos %u %llu:%llu:%u ret %i", TP_printk("%ps %pS key cache %u btree %u %llu:%llu:%u ret %i",
(void *) __entry->trans_ip, (void *) __entry->trans_ip,
(void *) __entry->caller_ip, (void *) __entry->caller_ip,
__entry->key_cache,
__entry->btree_id, __entry->btree_id,
__entry->pos_inode, __entry->pos_inode,
__entry->pos_offset, __entry->pos_offset,
...@@ -953,15 +957,17 @@ TRACE_EVENT(trans_restart_mem_realloced, ...@@ -953,15 +957,17 @@ TRACE_EVENT(trans_restart_mem_realloced,
DECLARE_EVENT_CLASS(node_lock_fail, DECLARE_EVENT_CLASS(node_lock_fail,
TP_PROTO(unsigned long trans_ip, TP_PROTO(unsigned long trans_ip,
unsigned long caller_ip, unsigned long caller_ip,
bool key_cache,
enum btree_id btree_id, enum btree_id btree_id,
struct bpos *pos, struct bpos *pos,
unsigned level, u32 iter_seq, unsigned node, u32 node_seq), unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
TP_ARGS(trans_ip, caller_ip, btree_id, pos, TP_ARGS(trans_ip, caller_ip, key_cache, btree_id, pos,
level, iter_seq, node, node_seq), level, iter_seq, node, node_seq),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned long, trans_ip ) __field(unsigned long, trans_ip )
__field(unsigned long, caller_ip ) __field(unsigned long, caller_ip )
__field(u8, key_cache )
__field(u8, btree_id ) __field(u8, btree_id )
__field(u64, pos_inode ) __field(u64, pos_inode )
__field(u64, pos_offset ) __field(u64, pos_offset )
...@@ -975,6 +981,7 @@ DECLARE_EVENT_CLASS(node_lock_fail, ...@@ -975,6 +981,7 @@ DECLARE_EVENT_CLASS(node_lock_fail,
TP_fast_assign( TP_fast_assign(
__entry->trans_ip = trans_ip; __entry->trans_ip = trans_ip;
__entry->caller_ip = caller_ip; __entry->caller_ip = caller_ip;
__entry->key_cache = key_cache;
__entry->btree_id = btree_id; __entry->btree_id = btree_id;
__entry->pos_inode = pos->inode; __entry->pos_inode = pos->inode;
__entry->pos_offset = pos->offset; __entry->pos_offset = pos->offset;
...@@ -985,9 +992,10 @@ DECLARE_EVENT_CLASS(node_lock_fail, ...@@ -985,9 +992,10 @@ DECLARE_EVENT_CLASS(node_lock_fail,
__entry->node_seq = node_seq; __entry->node_seq = node_seq;
), ),
TP_printk("%ps %pS btree %u pos %llu:%llu:%u level %u iter seq %u node %u node seq %u", TP_printk("%ps %pS key cache %u btree %u pos %llu:%llu:%u level %u iter seq %u node %u node seq %u",
(void *) __entry->trans_ip, (void *) __entry->trans_ip,
(void *) __entry->caller_ip, (void *) __entry->caller_ip,
__entry->key_cache,
__entry->btree_id, __entry->btree_id,
__entry->pos_inode, __entry->pos_inode,
__entry->pos_offset, __entry->pos_offset,
...@@ -999,20 +1007,22 @@ DECLARE_EVENT_CLASS(node_lock_fail, ...@@ -999,20 +1007,22 @@ DECLARE_EVENT_CLASS(node_lock_fail,
DEFINE_EVENT(node_lock_fail, node_upgrade_fail, DEFINE_EVENT(node_lock_fail, node_upgrade_fail,
TP_PROTO(unsigned long trans_ip, TP_PROTO(unsigned long trans_ip,
unsigned long caller_ip, unsigned long caller_ip,
bool key_cache,
enum btree_id btree_id, enum btree_id btree_id,
struct bpos *pos, struct bpos *pos,
unsigned level, u32 iter_seq, unsigned node, u32 node_seq), unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
TP_ARGS(trans_ip, caller_ip, btree_id, pos, TP_ARGS(trans_ip, caller_ip, key_cache, btree_id, pos,
level, iter_seq, node, node_seq) level, iter_seq, node, node_seq)
); );
DEFINE_EVENT(node_lock_fail, node_relock_fail, DEFINE_EVENT(node_lock_fail, node_relock_fail,
TP_PROTO(unsigned long trans_ip, TP_PROTO(unsigned long trans_ip,
unsigned long caller_ip, unsigned long caller_ip,
bool key_cache,
enum btree_id btree_id, enum btree_id btree_id,
struct bpos *pos, struct bpos *pos,
unsigned level, u32 iter_seq, unsigned node, u32 node_seq), unsigned level, u32 iter_seq, unsigned node, u32 node_seq),
TP_ARGS(trans_ip, caller_ip, btree_id, pos, TP_ARGS(trans_ip, caller_ip, key_cache, btree_id, pos,
level, iter_seq, node, node_seq) level, iter_seq, node, node_seq)
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment