Commit c13fbb7d authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Improve would_deadlock trace event

We now include backtraces for every thread involved in the cycle.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 074cbcda
......@@ -86,8 +86,14 @@ static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
prt_printf(out, "Found lock cycle (%u entries):", g->nr);
prt_newline(out);
for (i = g->g; i < g->g + g->nr; i++)
for (i = g->g; i < g->g + g->nr; i++) {
struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
if (!task)
continue;
bch2_btree_trans_to_text(out, i->trans);
bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1);
}
}
static noinline void print_chain(struct printbuf *out, struct lock_graph *g)
......@@ -144,8 +150,7 @@ static bool lock_graph_remove_non_waiters(struct lock_graph *g)
return false;
}
static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans,
unsigned long ip)
static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
......@@ -157,7 +162,7 @@ static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans
buf.atomic++;
print_cycle(&buf, g);
trace_trans_restart_would_deadlock(trans, ip, buf.buf);
trace_trans_restart_would_deadlock(trans, buf.buf);
printbuf_exit(&buf);
}
}
......@@ -165,7 +170,7 @@ static void trace_would_deadlock(struct lock_graph *g, struct btree_trans *trans
static int abort_lock(struct lock_graph *g, struct trans_waiting_for_lock *i)
{
if (i == g->g) {
trace_would_deadlock(g, i->trans, _RET_IP_);
trace_would_deadlock(g, i->trans);
return btree_trans_restart(i->trans, BCH_ERR_transaction_restart_would_deadlock);
} else {
i->trans->lock_must_abort = true;
......@@ -222,7 +227,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
prt_printf(&buf, "backtrace:");
prt_newline(&buf);
printbuf_indent_add(&buf, 2);
bch2_prt_task_backtrace(&buf, trans->locking_wait.task);
bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2);
printbuf_indent_sub(&buf, 2);
prt_newline(&buf);
}
......@@ -291,7 +296,7 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
if (cycle)
return -1;
trace_would_deadlock(&g, trans, _RET_IP_);
trace_would_deadlock(&g, trans);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
}
......
......@@ -627,7 +627,7 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
prt_printf(&i->buf, "backtrace:");
prt_newline(&i->buf);
printbuf_indent_add(&i->buf, 2);
bch2_prt_task_backtrace(&i->buf, task);
bch2_prt_task_backtrace(&i->buf, task, 0);
printbuf_indent_sub(&i->buf, 2);
prt_newline(&i->buf);
......
......@@ -72,6 +72,27 @@ DECLARE_EVENT_CLASS(trans_str,
__entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
);
DECLARE_EVENT_CLASS(trans_str_nocaller,
TP_PROTO(struct btree_trans *trans, const char *str),
TP_ARGS(trans, str),
TP_STRUCT__entry(
__field(dev_t, dev )
__array(char, trans_fn, 32 )
__string(str, str )
),
TP_fast_assign(
__entry->dev = trans->c->dev;
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__assign_str(str, str);
),
TP_printk("%d,%d %s %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->trans_fn, __get_str(str))
);
DECLARE_EVENT_CLASS(btree_node_nofs,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b),
......@@ -1243,11 +1264,10 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_memory_allocation_failure,
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(trans_str, trans_restart_would_deadlock,
DEFINE_EVENT(trans_str_nocaller, trans_restart_would_deadlock,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
const char *cycle),
TP_ARGS(trans, caller_ip, cycle)
TP_ARGS(trans, cycle)
);
DEFINE_EVENT(transaction_event, trans_restart_would_deadlock_recursion_limit,
......
......@@ -267,7 +267,7 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines)
console_unlock();
}
int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task)
int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr)
{
#ifdef CONFIG_STACKTRACE
unsigned nr_entries = 0;
......@@ -282,7 +282,7 @@ int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task)
return -1;
do {
nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, 0);
nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, skipnr + 1);
} while (nr_entries == stack->size &&
!(ret = darray_make_room(stack, stack->size * 2)));
......@@ -303,10 +303,10 @@ void bch2_prt_backtrace(struct printbuf *out, bch_stacktrace *stack)
}
}
int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task)
int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr)
{
bch_stacktrace stack = { 0 };
int ret = bch2_save_backtrace(&stack, task);
int ret = bch2_save_backtrace(&stack, task, skipnr + 1);
bch2_prt_backtrace(out, &stack);
darray_exit(&stack);
......
......@@ -347,9 +347,9 @@ void bch2_prt_u64_binary(struct printbuf *, u64, unsigned);
void bch2_print_string_as_lines(const char *prefix, const char *lines);
typedef DARRAY(unsigned long) bch_stacktrace;
int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *);
int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned);
void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *);
int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned);
#define NR_QUANTILES 15
#define QUANTILE_IDX(i) inorder_to_eytzinger0(i, NR_QUANTILES)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment