Commit 674cfc26 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Add persistent counters for all tracepoints

Also, do some reorganizing/renaming, convert atomic counters in bch_fs
to persistent counters, and add a few missing counters.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent d97e6aae
...@@ -1134,8 +1134,7 @@ static int invalidate_one_bucket(struct btree_trans *trans, ...@@ -1134,8 +1134,7 @@ static int invalidate_one_bucket(struct btree_trans *trans,
if (ret) if (ret)
goto out; goto out;
trace_invalidate_bucket(c, bucket.inode, bucket.offset, cached_sectors); trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
this_cpu_inc(c->counters[BCH_COUNTER_bucket_invalidate]);
--*nr_to_invalidate; --*nr_to_invalidate;
out: out:
bch2_trans_iter_exit(trans, &alloc_iter); bch2_trans_iter_exit(trans, &alloc_iter);
......
...@@ -584,32 +584,32 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans, ...@@ -584,32 +584,32 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
if (!ob) if (!ob)
ob = ERR_PTR(-BCH_ERR_no_buckets_found); ob = ERR_PTR(-BCH_ERR_no_buckets_found);
if (!IS_ERR(ob)) { if (!IS_ERR(ob))
trace_bucket_alloc(ca, bch2_alloc_reserves[reserve], trace_and_count(c, bucket_alloc, ca,
usage.d[BCH_DATA_free].buckets, bch2_alloc_reserves[reserve],
avail, usage.d[BCH_DATA_free].buckets,
bch2_copygc_wait_amount(c), avail,
c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), bch2_copygc_wait_amount(c),
buckets_seen, c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
skipped_open, buckets_seen,
skipped_need_journal_commit, skipped_open,
skipped_nouse, skipped_need_journal_commit,
cl == NULL, skipped_nouse,
""); cl == NULL,
} else { "");
trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve], else
usage.d[BCH_DATA_free].buckets, trace_and_count(c, bucket_alloc_fail, ca,
avail, bch2_alloc_reserves[reserve],
bch2_copygc_wait_amount(c), usage.d[BCH_DATA_free].buckets,
c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now), avail,
buckets_seen, bch2_copygc_wait_amount(c),
skipped_open, c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
skipped_need_journal_commit, buckets_seen,
skipped_nouse, skipped_open,
cl == NULL, skipped_need_journal_commit,
bch2_err_str(PTR_ERR(ob))); skipped_nouse,
atomic_long_inc(&c->bucket_alloc_fail); cl == NULL,
} bch2_err_str(PTR_ERR(ob)));
return ob; return ob;
} }
......
...@@ -212,6 +212,12 @@ ...@@ -212,6 +212,12 @@
#define dynamic_fault(...) 0 #define dynamic_fault(...) 0
#define race_fault(...) 0 #define race_fault(...) 0
#define trace_and_count(_c, _name, ...) \
do { \
this_cpu_inc((_c)->counters[BCH_COUNTER_##_name]); \
trace_##_name(__VA_ARGS__); \
} while (0)
#define bch2_fs_init_fault(name) \ #define bch2_fs_init_fault(name) \
dynamic_fault("bcachefs:bch_fs_init:" name) dynamic_fault("bcachefs:bch_fs_init:" name)
#define bch2_meta_read_fault(name) \ #define bch2_meta_read_fault(name) \
...@@ -916,12 +922,6 @@ mempool_t bio_bounce_pages; ...@@ -916,12 +922,6 @@ mempool_t bio_bounce_pages;
u64 last_bucket_seq_cleanup; u64 last_bucket_seq_cleanup;
/* TODO rewrite as counters - The rest of this all shows up in sysfs */
atomic_long_t read_realloc_races;
atomic_long_t extent_migrate_done;
atomic_long_t extent_migrate_raced;
atomic_long_t bucket_alloc_fail;
u64 counters_on_mount[BCH_COUNTER_NR]; u64 counters_on_mount[BCH_COUNTER_NR];
u64 __percpu *counters; u64 __percpu *counters;
......
...@@ -1326,12 +1326,81 @@ struct bch_sb_field_disk_groups { ...@@ -1326,12 +1326,81 @@ struct bch_sb_field_disk_groups {
/* BCH_SB_FIELD_counters */ /* BCH_SB_FIELD_counters */
#define BCH_PERSISTENT_COUNTERS() \ #define BCH_PERSISTENT_COUNTERS() \
x(io_read, 0) \ x(io_read, 0) \
x(io_write, 1) \ x(io_write, 1) \
x(io_move, 2) \ x(io_move, 2) \
x(bucket_invalidate, 3) \ x(bucket_invalidate, 3) \
x(bucket_discard, 4) x(bucket_discard, 4) \
x(bucket_alloc, 5) \
x(bucket_alloc_fail, 6) \
x(btree_cache_scan, 7) \
x(btree_cache_reap, 8) \
x(btree_cache_cannibalize, 9) \
x(btree_cache_cannibalize_lock, 10) \
x(btree_cache_cannibalize_lock_fail, 11) \
x(btree_cache_cannibalize_unlock, 12) \
x(btree_node_write, 13) \
x(btree_node_read, 14) \
x(btree_node_compact, 15) \
x(btree_node_merge, 16) \
x(btree_node_split, 17) \
x(btree_node_rewrite, 18) \
x(btree_node_alloc, 19) \
x(btree_node_free, 20) \
x(btree_node_set_root, 21) \
x(btree_path_relock_fail, 22) \
x(btree_path_upgrade_fail, 23) \
x(btree_reserve_get_fail, 24) \
x(journal_entry_full, 25) \
x(journal_full, 26) \
x(journal_reclaim_finish, 27) \
x(journal_reclaim_start, 28) \
x(journal_write, 29) \
x(read_promote, 30) \
x(read_bounce, 31) \
x(read_split, 33) \
x(read_retry, 32) \
x(read_reuse_race, 34) \
x(move_extent_read, 35) \
x(move_extent_write, 36) \
x(move_extent_finish, 37) \
x(move_extent_fail, 38) \
x(move_extent_alloc_mem_fail, 39) \
x(copygc, 40) \
x(copygc_wait, 41) \
x(gc_gens_end, 42) \
x(gc_gens_start, 43) \
x(trans_blocked_journal_reclaim, 44) \
x(trans_restart_btree_node_reused, 45) \
x(trans_restart_btree_node_split, 46) \
x(trans_restart_fault_inject, 47) \
x(trans_restart_iter_upgrade, 48) \
x(trans_restart_journal_preres_get, 49) \
x(trans_restart_journal_reclaim, 50) \
x(trans_restart_journal_res_get, 51) \
x(trans_restart_key_cache_key_realloced, 52) \
x(trans_restart_key_cache_raced, 53) \
x(trans_restart_mark_replicas, 54) \
x(trans_restart_mem_realloced, 55) \
x(trans_restart_memory_allocation_failure, 56) \
x(trans_restart_relock, 57) \
x(trans_restart_relock_after_fill, 58) \
x(trans_restart_relock_key_cache_fill, 59) \
x(trans_restart_relock_next_node, 60) \
x(trans_restart_relock_parent_for_fill, 61) \
x(trans_restart_relock_path, 62) \
x(trans_restart_relock_path_intent, 63) \
x(trans_restart_too_many_iters, 64) \
x(trans_restart_traverse, 65) \
x(trans_restart_upgrade, 66) \
x(trans_restart_would_deadlock, 67) \
x(trans_restart_would_deadlock_write, 68) \
x(trans_restart_injected, 69) \
x(trans_restart_key_cache_upgrade, 70) \
x(trans_traverse_all, 71) \
x(transaction_commit, 72) \
x(write_super, 73)
enum bch_persistent_counters { enum bch_persistent_counters {
#define x(t, n, ...) BCH_COUNTER_##t, #define x(t, n, ...) BCH_COUNTER_##t,
......
...@@ -253,7 +253,7 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush) ...@@ -253,7 +253,7 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
} }
out: out:
if (b->hash_val && !ret) if (b->hash_val && !ret)
trace_btree_node_reap(c, b); trace_and_count(c, btree_cache_reap, c, b);
return ret; return ret;
out_unlock: out_unlock:
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
...@@ -377,7 +377,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, ...@@ -377,7 +377,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
ret = freed; ret = freed;
memalloc_nofs_restore(flags); memalloc_nofs_restore(flags);
out_norestore: out_norestore:
trace_btree_cache_scan(sc->nr_to_scan, can_free, ret); trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret);
return ret; return ret;
} }
...@@ -504,7 +504,7 @@ void bch2_btree_cache_cannibalize_unlock(struct bch_fs *c) ...@@ -504,7 +504,7 @@ void bch2_btree_cache_cannibalize_unlock(struct bch_fs *c)
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
if (bc->alloc_lock == current) { if (bc->alloc_lock == current) {
trace_btree_node_cannibalize_unlock(c); trace_and_count(c, btree_cache_cannibalize_unlock, c);
bc->alloc_lock = NULL; bc->alloc_lock = NULL;
closure_wake_up(&bc->alloc_wait); closure_wake_up(&bc->alloc_wait);
} }
...@@ -520,7 +520,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) ...@@ -520,7 +520,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
goto success; goto success;
if (!cl) { if (!cl) {
trace_btree_node_cannibalize_lock_fail(c); trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
return -ENOMEM; return -ENOMEM;
} }
...@@ -534,11 +534,11 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl) ...@@ -534,11 +534,11 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
goto success; goto success;
} }
trace_btree_node_cannibalize_lock_fail(c); trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
return -EAGAIN; return -EAGAIN;
success: success:
trace_btree_node_cannibalize_lock(c); trace_and_count(c, btree_cache_cannibalize_lock, c);
return 0; return 0;
} }
...@@ -662,7 +662,7 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks) ...@@ -662,7 +662,7 @@ struct btree *bch2_btree_node_mem_alloc(struct bch_fs *c, bool pcpu_read_locks)
mutex_unlock(&bc->lock); mutex_unlock(&bc->lock);
trace_btree_node_cannibalize(c); trace_and_count(c, btree_cache_cannibalize, c);
goto out; goto out;
} }
...@@ -691,7 +691,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -691,7 +691,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
* been freed: * been freed:
*/ */
if (trans && !bch2_btree_node_relock(trans, path, level + 1)) { if (trans && !bch2_btree_node_relock(trans, path, level + 1)) {
trace_trans_restart_relock_parent_for_fill(trans, _THIS_IP_, path); trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock)); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock));
} }
...@@ -699,7 +699,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -699,7 +699,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
if (trans && b == ERR_PTR(-ENOMEM)) { if (trans && b == ERR_PTR(-ENOMEM)) {
trans->memory_allocation_failure = true; trans->memory_allocation_failure = true;
trace_trans_restart_memory_allocation_failure(trans, _THIS_IP_, path); trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail)); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));
} }
...@@ -748,7 +748,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c, ...@@ -748,7 +748,7 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
if (!six_relock_type(&b->c.lock, lock_type, seq)) { if (!six_relock_type(&b->c.lock, lock_type, seq)) {
if (trans) if (trans)
trace_trans_restart_relock_after_fill(trans, _THIS_IP_, path); trace_and_count(c, trans_restart_relock_after_fill, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill)); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_after_fill));
} }
...@@ -903,7 +903,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path * ...@@ -903,7 +903,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
if (bch2_btree_node_relock(trans, path, level + 1)) if (bch2_btree_node_relock(trans, path, level + 1))
goto retry; goto retry;
trace_trans_restart_btree_node_reused(trans, trace_ip, path); trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused)); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
} }
} }
......
...@@ -1931,7 +1931,7 @@ int bch2_gc_gens(struct bch_fs *c) ...@@ -1931,7 +1931,7 @@ int bch2_gc_gens(struct bch_fs *c)
if (!mutex_trylock(&c->gc_gens_lock)) if (!mutex_trylock(&c->gc_gens_lock))
return 0; return 0;
trace_gc_gens_start(c); trace_and_count(c, gc_gens_start, c);
down_read(&c->gc_lock); down_read(&c->gc_lock);
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
...@@ -1992,7 +1992,7 @@ int bch2_gc_gens(struct bch_fs *c) ...@@ -1992,7 +1992,7 @@ int bch2_gc_gens(struct bch_fs *c)
c->gc_count++; c->gc_count++;
bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time); bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
trace_gc_gens_end(c); trace_and_count(c, gc_gens_end, c);
err: err:
for_each_member_device(ca, c, i) { for_each_member_device(ca, c, i) {
kvfree(ca->oldest_gen); kvfree(ca->oldest_gen);
......
...@@ -1485,7 +1485,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b, ...@@ -1485,7 +1485,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
struct bio *bio; struct bio *bio;
int ret; int ret;
trace_btree_read(c, b); trace_and_count(c, btree_node_read, c, b);
if (bch2_verify_all_btree_replicas && if (bch2_verify_all_btree_replicas &&
!btree_node_read_all_replicas(c, b, sync)) !btree_node_read_all_replicas(c, b, sync))
...@@ -1974,7 +1974,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) ...@@ -1974,7 +1974,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
c->opts.nochanges) c->opts.nochanges)
goto err; goto err;
trace_btree_write(b, bytes_to_write, sectors_to_write); trace_and_count(c, btree_node_write, b, bytes_to_write, sectors_to_write);
wbio = container_of(bio_alloc_bioset(NULL, wbio = container_of(bio_alloc_bioset(NULL,
buf_pages(data, sectors_to_write << 9), buf_pages(data, sectors_to_write << 9),
......
...@@ -1072,7 +1072,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans) ...@@ -1072,7 +1072,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
trans->in_traverse_all = false; trans->in_traverse_all = false;
trace_trans_traverse_all(trans, trace_ip); trace_and_count(c, trans_traverse_all, trans, trace_ip);
return ret; return ret;
} }
...@@ -1209,7 +1209,7 @@ int __must_check bch2_btree_path_traverse(struct btree_trans *trans, ...@@ -1209,7 +1209,7 @@ int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
u64 max = ~(~0ULL << restart_probability_bits); u64 max = ~(~0ULL << restart_probability_bits);
if (!get_random_u32_below(max)) { if (!get_random_u32_below(max)) {
trace_transaction_restart_injected(trans, _RET_IP_); trace_and_count(trans->c, trans_restart_injected, trans, _RET_IP_);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject); return btree_trans_restart(trans, BCH_ERR_transaction_restart_fault_inject);
} }
} }
...@@ -1728,7 +1728,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter) ...@@ -1728,7 +1728,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); path->l[path->level].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock); path->l[path->level + 1].b = ERR_PTR(-BCH_ERR_no_btree_node_relock);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
trace_trans_restart_relock_next_node(trans, _THIS_IP_, path); trace_and_count(trans->c, trans_restart_relock_next_node, trans, _THIS_IP_, path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
goto err; goto err;
} }
...@@ -2773,7 +2773,7 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) ...@@ -2773,7 +2773,7 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
trans->mem_bytes = new_bytes; trans->mem_bytes = new_bytes;
if (old_bytes) { if (old_bytes) {
trace_trans_restart_mem_realloced(trans, _RET_IP_, new_bytes); trace_and_count(trans->c, trans_restart_mem_realloced, trans, _RET_IP_, new_bytes);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced)); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_mem_realloced));
} }
} }
......
...@@ -388,7 +388,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter * ...@@ -388,7 +388,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *
static inline int btree_trans_too_many_iters(struct btree_trans *trans) static inline int btree_trans_too_many_iters(struct btree_trans *trans)
{ {
if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX - 8) { if (hweight64(trans->paths_allocated) > BTREE_ITER_MAX - 8) {
trace_trans_restart_too_many_iters(trans, _THIS_IP_); trace_and_count(trans->c, trans_restart_too_many_iters, trans, _THIS_IP_);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters); return btree_trans_restart(trans, BCH_ERR_transaction_restart_too_many_iters);
} }
......
...@@ -291,7 +291,7 @@ static int btree_key_cache_fill(struct btree_trans *trans, ...@@ -291,7 +291,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
k = bch2_btree_path_peek_slot(path, &u); k = bch2_btree_path_peek_slot(path, &u);
if (!bch2_btree_node_relock(trans, ck_path, 0)) { if (!bch2_btree_node_relock(trans, ck_path, 0)) {
trace_trans_restart_relock_key_cache_fill(trans, _THIS_IP_, ck_path); trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
goto err; goto err;
} }
...@@ -414,7 +414,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path ...@@ -414,7 +414,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
*/ */
if (!path->locks_want && if (!path->locks_want &&
!__bch2_btree_path_upgrade(trans, path, 1)) { !__bch2_btree_path_upgrade(trans, path, 1)) {
trace_transaction_restart_key_cache_upgrade(trans, _THIS_IP_); trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
goto err; goto err;
} }
......
...@@ -152,7 +152,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans, ...@@ -152,7 +152,7 @@ int __bch2_btree_node_lock(struct btree_trans *trans,
return btree_node_lock_type(trans, path, b, pos, level, return btree_node_lock_type(trans, path, b, pos, level,
type, should_sleep_fn, p); type, should_sleep_fn, p);
deadlock: deadlock:
trace_trans_restart_would_deadlock(trans, ip, reason, linked, path, &pos); trace_and_count(trans->c, trans_restart_would_deadlock, trans, ip, reason, linked, path, &pos);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock); return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock);
} }
...@@ -218,7 +218,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans, ...@@ -218,7 +218,7 @@ bool __bch2_btree_node_relock(struct btree_trans *trans,
return true; return true;
} }
fail: fail:
trace_btree_node_relock_fail(trans, _RET_IP_, path, level); trace_and_count(trans->c, btree_path_relock_fail, trans, _RET_IP_, path, level);
return false; return false;
} }
...@@ -262,7 +262,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans, ...@@ -262,7 +262,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
goto success; goto success;
} }
trace_btree_node_upgrade_fail(trans, _RET_IP_, path, level); trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
return false; return false;
success: success:
mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent); mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
...@@ -285,7 +285,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans, ...@@ -285,7 +285,7 @@ int bch2_btree_path_relock_intent(struct btree_trans *trans,
if (!bch2_btree_node_relock(trans, path, l)) { if (!bch2_btree_node_relock(trans, path, l)) {
__bch2_btree_path_unlock(trans, path); __bch2_btree_path_unlock(trans, path);
btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE); btree_path_set_dirty(path, BTREE_ITER_NEED_TRAVERSE);
trace_trans_restart_relock_path_intent(trans, _RET_IP_, path); trace_and_count(trans->c, trans_restart_relock_path_intent, trans, _RET_IP_, path);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent); return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path_intent);
} }
} }
...@@ -304,7 +304,7 @@ int __bch2_btree_path_relock(struct btree_trans *trans, ...@@ -304,7 +304,7 @@ int __bch2_btree_path_relock(struct btree_trans *trans,
struct btree_path *path, unsigned long trace_ip) struct btree_path *path, unsigned long trace_ip)
{ {
if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) { if (!bch2_btree_path_relock_norestart(trans, path, trace_ip)) {
trace_trans_restart_relock_path(trans, trace_ip, path); trace_and_count(trans->c, trans_restart_relock_path, trans, trace_ip, path);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path); return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock_path);
} }
...@@ -416,7 +416,7 @@ int bch2_trans_relock(struct btree_trans *trans) ...@@ -416,7 +416,7 @@ int bch2_trans_relock(struct btree_trans *trans)
trans_for_each_path(trans, path) trans_for_each_path(trans, path)
if (path->should_be_locked && if (path->should_be_locked &&
bch2_btree_path_relock(trans, path, _RET_IP_)) { bch2_btree_path_relock(trans, path, _RET_IP_)) {
trace_trans_restart_relock(trans, _RET_IP_, path); trace_and_count(trans->c, trans_restart_relock, trans, _RET_IP_, path);
BUG_ON(!trans->restarted); BUG_ON(!trans->restarted);
return -BCH_ERR_transaction_restart_relock; return -BCH_ERR_transaction_restart_relock;
} }
......
...@@ -143,7 +143,7 @@ bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b, ...@@ -143,7 +143,7 @@ bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
static void __btree_node_free(struct bch_fs *c, struct btree *b) static void __btree_node_free(struct bch_fs *c, struct btree *b)
{ {
trace_btree_node_free(c, b); trace_and_count(c, btree_node_free, c, b);
BUG_ON(btree_node_dirty(b)); BUG_ON(btree_node_dirty(b));
BUG_ON(btree_node_need_write(b)); BUG_ON(btree_node_need_write(b));
...@@ -305,7 +305,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev ...@@ -305,7 +305,7 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev
ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id); ret = bch2_btree_node_hash_insert(&c->btree_cache, b, level, as->btree_id);
BUG_ON(ret); BUG_ON(ret);
trace_btree_node_alloc(c, b); trace_and_count(c, btree_node_alloc, c, b);
return b; return b;
} }
...@@ -995,7 +995,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, ...@@ -995,7 +995,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
nr_nodes[1] += 1; nr_nodes[1] += 1;
if (!bch2_btree_path_upgrade(trans, path, U8_MAX)) { if (!bch2_btree_path_upgrade(trans, path, U8_MAX)) {
trace_trans_restart_iter_upgrade(trans, _RET_IP_, path); trace_and_count(c, trans_restart_iter_upgrade, trans, _RET_IP_, path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -1058,7 +1058,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, ...@@ -1058,7 +1058,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
BTREE_UPDATE_JOURNAL_RES, BTREE_UPDATE_JOURNAL_RES,
journal_flags); journal_flags);
if (ret) { if (ret) {
trace_trans_restart_journal_preres_get(trans, _RET_IP_, journal_flags); trace_and_count(c, trans_restart_journal_preres_get, trans, _RET_IP_, journal_flags);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_journal_preres_get);
goto err; goto err;
} }
...@@ -1091,8 +1091,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, ...@@ -1091,8 +1091,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
} }
if (ret) { if (ret) {
trace_btree_reserve_get_fail(trans->fn, _RET_IP_, trace_and_count(c, btree_reserve_get_fail, trans->fn, _RET_IP_, nr_nodes[0] + nr_nodes[1]);
nr_nodes[0] + nr_nodes[1]);
goto err; goto err;
} }
...@@ -1147,7 +1146,7 @@ static void bch2_btree_set_root(struct btree_update *as, ...@@ -1147,7 +1146,7 @@ static void bch2_btree_set_root(struct btree_update *as,
struct bch_fs *c = as->c; struct bch_fs *c = as->c;
struct btree *old; struct btree *old;
trace_btree_set_root(c, b); trace_and_count(c, btree_node_set_root, c, b);
BUG_ON(!b->written); BUG_ON(!b->written);
old = btree_node_root(c, b); old = btree_node_root(c, b);
...@@ -1434,7 +1433,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, ...@@ -1434,7 +1433,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans,
btree_split_insert_keys(as, trans, path, n1, keys); btree_split_insert_keys(as, trans, path, n1, keys);
if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) { if (bset_u64s(&n1->set[0]) > BTREE_SPLIT_THRESHOLD(c)) {
trace_btree_split(c, b); trace_and_count(c, btree_node_split, c, b);
n2 = __btree_split_node(as, n1); n2 = __btree_split_node(as, n1);
...@@ -1468,7 +1467,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans, ...@@ -1468,7 +1467,7 @@ static void btree_split(struct btree_update *as, struct btree_trans *trans,
bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0); bch2_btree_node_write(c, n3, SIX_LOCK_intent, 0);
} }
} else { } else {
trace_btree_compact(c, b); trace_and_count(c, btree_node_compact, c, b);
bch2_btree_build_aux_trees(n1); bch2_btree_build_aux_trees(n1);
six_unlock_write(&n1->c.lock); six_unlock_write(&n1->c.lock);
...@@ -1737,7 +1736,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, ...@@ -1737,7 +1736,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
if (ret) if (ret)
goto err; goto err;
trace_btree_merge(c, b); trace_and_count(c, btree_node_merge, c, b);
bch2_btree_interior_update_will_free_node(as, b); bch2_btree_interior_update_will_free_node(as, b);
bch2_btree_interior_update_will_free_node(as, m); bch2_btree_interior_update_will_free_node(as, m);
...@@ -1829,7 +1828,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans, ...@@ -1829,7 +1828,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
bch2_btree_build_aux_trees(n); bch2_btree_build_aux_trees(n);
six_unlock_write(&n->c.lock); six_unlock_write(&n->c.lock);
trace_btree_rewrite(c, b); trace_and_count(c, btree_node_rewrite, c, b);
bch2_btree_node_write(c, n, SIX_LOCK_intent, 0); bch2_btree_node_write(c, n, SIX_LOCK_intent, 0);
......
...@@ -285,7 +285,7 @@ bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s, ...@@ -285,7 +285,7 @@ bch2_trans_journal_preres_get_cold(struct btree_trans *trans, unsigned u64s,
ret = bch2_trans_relock(trans); ret = bch2_trans_relock(trans);
if (ret) { if (ret) {
trace_trans_restart_journal_preres_get(trans, trace_ip, 0); trace_and_count(c, trans_restart_journal_preres_get, trans, trace_ip, 0);
return ret; return ret;
} }
...@@ -375,7 +375,7 @@ btree_key_can_insert_cached(struct btree_trans *trans, ...@@ -375,7 +375,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
* Keys returned by peek() are no longer valid pointers, so we need a * Keys returned by peek() are no longer valid pointers, so we need a
* transaction restart: * transaction restart:
*/ */
trace_trans_restart_key_cache_key_realloced(trans, _RET_IP_, path, old_u64s, new_u64s); trace_and_count(c, trans_restart_key_cache_key_realloced, trans, _RET_IP_, path, old_u64s, new_u64s);
return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_key_cache_realloced); return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_key_cache_realloced);
} }
...@@ -567,7 +567,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, ...@@ -567,7 +567,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
int ret; int ret;
if (race_fault()) { if (race_fault()) {
trace_trans_restart_fault_inject(trans, trace_ip); trace_and_count(c, trans_restart_fault_inject, trans, trace_ip);
return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject); return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
} }
...@@ -842,7 +842,7 @@ static inline int trans_lock_write(struct btree_trans *trans) ...@@ -842,7 +842,7 @@ static inline int trans_lock_write(struct btree_trans *trans)
bch2_btree_node_unlock_write_inlined(trans, i->path, insert_l(i)->b); bch2_btree_node_unlock_write_inlined(trans, i->path, insert_l(i)->b);
} }
trace_trans_restart_would_deadlock_write(trans); trace_and_count(trans->c, trans_restart_would_deadlock_write, trans);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write); return btree_trans_restart(trans, BCH_ERR_transaction_restart_would_deadlock_write);
} }
...@@ -975,7 +975,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -975,7 +975,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
case BTREE_INSERT_BTREE_NODE_FULL: case BTREE_INSERT_BTREE_NODE_FULL:
ret = bch2_btree_split_leaf(trans, i->path, trans->flags); ret = bch2_btree_split_leaf(trans, i->path, trans->flags);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
trace_trans_restart_btree_node_split(trans, trace_ip, i->path); trace_and_count(c, trans_restart_btree_node_split, trans, trace_ip, i->path);
break; break;
case BTREE_INSERT_NEED_MARK_REPLICAS: case BTREE_INSERT_NEED_MARK_REPLICAS:
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
...@@ -986,7 +986,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -986,7 +986,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
ret = bch2_trans_relock(trans); ret = bch2_trans_relock(trans);
if (ret) if (ret)
trace_trans_restart_mark_replicas(trans, trace_ip); trace_and_count(c, trans_restart_mark_replicas, trans, trace_ip);
break; break;
case BTREE_INSERT_NEED_JOURNAL_RES: case BTREE_INSERT_NEED_JOURNAL_RES:
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
...@@ -1003,12 +1003,12 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -1003,12 +1003,12 @@ int bch2_trans_commit_error(struct btree_trans *trans,
ret = bch2_trans_relock(trans); ret = bch2_trans_relock(trans);
if (ret) if (ret)
trace_trans_restart_journal_res_get(trans, trace_ip); trace_and_count(c, trans_restart_journal_res_get, trans, trace_ip);
break; break;
case BTREE_INSERT_NEED_JOURNAL_RECLAIM: case BTREE_INSERT_NEED_JOURNAL_RECLAIM:
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
trace_trans_blocked_journal_reclaim(trans, trace_ip); trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
wait_event_freezable(c->journal.reclaim_wait, wait_event_freezable(c->journal.reclaim_wait,
(ret = journal_reclaim_wait_done(c))); (ret = journal_reclaim_wait_done(c)));
...@@ -1017,7 +1017,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, ...@@ -1017,7 +1017,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
ret = bch2_trans_relock(trans); ret = bch2_trans_relock(trans);
if (ret) if (ret)
trace_trans_restart_journal_reclaim(trans, trace_ip); trace_and_count(c, trans_restart_journal_reclaim, trans, trace_ip);
break; break;
default: default:
BUG_ON(ret >= 0); BUG_ON(ret >= 0);
...@@ -1120,7 +1120,7 @@ int __bch2_trans_commit(struct btree_trans *trans) ...@@ -1120,7 +1120,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
BUG_ON(!i->path->should_be_locked); BUG_ON(!i->path->should_be_locked);
if (unlikely(!bch2_btree_path_upgrade(trans, i->path, i->level + 1))) { if (unlikely(!bch2_btree_path_upgrade(trans, i->path, i->level + 1))) {
trace_trans_restart_upgrade(trans, _RET_IP_, i->path); trace_and_count(c, trans_restart_upgrade, trans, _RET_IP_, i->path);
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
goto out; goto out;
} }
...@@ -1166,7 +1166,7 @@ int __bch2_trans_commit(struct btree_trans *trans) ...@@ -1166,7 +1166,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
if (ret) if (ret)
goto err; goto err;
trace_transaction_commit(trans, _RET_IP_); trace_and_count(c, transaction_commit, trans, _RET_IP_);
out: out:
bch2_journal_preres_put(&c->journal, &trans->journal_preres); bch2_journal_preres_put(&c->journal, &trans->journal_preres);
...@@ -1642,7 +1642,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter ...@@ -1642,7 +1642,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
ck = (void *) iter->key_cache_path->l[0].b; ck = (void *) iter->key_cache_path->l[0].b;
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
trace_trans_restart_key_cache_raced(trans, _RET_IP_); trace_and_count(trans->c, trans_restart_key_cache_raced, trans, _RET_IP_);
return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced); return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
} }
......
...@@ -231,9 +231,12 @@ int bch2_data_update_index_update(struct bch_write_op *op) ...@@ -231,9 +231,12 @@ int bch2_data_update_index_update(struct bch_write_op *op)
m->data_opts.btree_insert_flags); m->data_opts.btree_insert_flags);
if (!ret) { if (!ret) {
bch2_btree_iter_set_pos(&iter, next_pos); bch2_btree_iter_set_pos(&iter, next_pos);
atomic_long_inc(&c->extent_migrate_done);
if (ec_ob) if (ec_ob)
bch2_ob_add_backpointer(c, ec_ob, &insert->k); bch2_ob_add_backpointer(c, ec_ob, &insert->k);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
trace_move_extent_finish(&new->k);
} }
err: err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
...@@ -248,16 +251,16 @@ int bch2_data_update_index_update(struct bch_write_op *op) ...@@ -248,16 +251,16 @@ int bch2_data_update_index_update(struct bch_write_op *op)
} }
continue; continue;
nomatch: nomatch:
trace_data_update_fail(&old.k->p);
if (m->ctxt) { if (m->ctxt) {
BUG_ON(k.k->p.offset <= iter.pos.offset); BUG_ON(k.k->p.offset <= iter.pos.offset);
atomic64_inc(&m->ctxt->stats->keys_raced); atomic64_inc(&m->ctxt->stats->keys_raced);
atomic64_add(k.k->p.offset - iter.pos.offset, atomic64_add(k.k->p.offset - iter.pos.offset,
&m->ctxt->stats->sectors_raced); &m->ctxt->stats->sectors_raced);
} }
atomic_long_inc(&c->extent_migrate_raced);
trace_move_race(&new->k); this_cpu_add(c->counters[BCH_COUNTER_move_extent_fail], new->k.size);
trace_move_extent_fail(&new->k);
bch2_btree_iter_advance(&iter); bch2_btree_iter_advance(&iter);
goto next; goto next;
} }
......
...@@ -1496,7 +1496,7 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio) ...@@ -1496,7 +1496,7 @@ static void promote_start(struct promote_op *op, struct bch_read_bio *rbio)
{ {
struct bio *bio = &op->write.op.wbio.bio; struct bio *bio = &op->write.op.wbio.bio;
trace_promote(&rbio->bio); trace_and_count(op->write.op.c, read_promote, &rbio->bio);
/* we now own pages: */ /* we now own pages: */
BUG_ON(!rbio->bounce); BUG_ON(!rbio->bounce);
...@@ -1761,7 +1761,7 @@ static void bch2_rbio_retry(struct work_struct *work) ...@@ -1761,7 +1761,7 @@ static void bch2_rbio_retry(struct work_struct *work)
}; };
struct bch_io_failures failed = { .nr = 0 }; struct bch_io_failures failed = { .nr = 0 };
trace_read_retry(&rbio->bio); trace_and_count(c, read_retry, &rbio->bio);
if (rbio->retry == READ_RETRY_AVOID) if (rbio->retry == READ_RETRY_AVOID)
bch2_mark_io_failure(&failed, &rbio->pick); bch2_mark_io_failure(&failed, &rbio->pick);
...@@ -2017,7 +2017,7 @@ static void bch2_read_endio(struct bio *bio) ...@@ -2017,7 +2017,7 @@ static void bch2_read_endio(struct bio *bio)
if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) || if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
ptr_stale(ca, &rbio->pick.ptr)) { ptr_stale(ca, &rbio->pick.ptr)) {
atomic_long_inc(&c->read_realloc_races); trace_and_count(c, read_reuse_race, &rbio->bio);
if (rbio->flags & BCH_READ_RETRY_IF_STALE) if (rbio->flags & BCH_READ_RETRY_IF_STALE)
bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN); bch2_rbio_error(rbio, READ_RETRY, BLK_STS_AGAIN);
...@@ -2305,7 +2305,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, ...@@ -2305,7 +2305,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
rbio->bio.bi_end_io = bch2_read_endio; rbio->bio.bi_end_io = bch2_read_endio;
if (rbio->bounce) if (rbio->bounce)
trace_read_bounce(&rbio->bio); trace_and_count(c, read_bounce, &rbio->bio);
this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio)); this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio));
bch2_increment_clock(c, bio_sectors(&rbio->bio), READ); bch2_increment_clock(c, bio_sectors(&rbio->bio), READ);
...@@ -2320,7 +2320,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, ...@@ -2320,7 +2320,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) { if (!(flags & (BCH_READ_IN_RETRY|BCH_READ_LAST_FRAGMENT))) {
bio_inc_remaining(&orig->bio); bio_inc_remaining(&orig->bio);
trace_read_split(&orig->bio); trace_and_count(c, read_split, &orig->bio);
} }
if (!rbio->pick.idx) { if (!rbio->pick.idx) {
......
...@@ -390,12 +390,12 @@ static int __journal_res_get(struct journal *j, struct journal_res *res, ...@@ -390,12 +390,12 @@ static int __journal_res_get(struct journal *j, struct journal_res *res,
ret = journal_entry_open(j); ret = journal_entry_open(j);
if (ret == JOURNAL_ERR_max_in_flight) if (ret == JOURNAL_ERR_max_in_flight)
trace_journal_entry_full(c); trace_and_count(c, journal_entry_full, c);
unlock: unlock:
if ((ret && ret != JOURNAL_ERR_insufficient_devices) && if ((ret && ret != JOURNAL_ERR_insufficient_devices) &&
!j->res_get_blocked_start) { !j->res_get_blocked_start) {
j->res_get_blocked_start = local_clock() ?: 1; j->res_get_blocked_start = local_clock() ?: 1;
trace_journal_full(c); trace_and_count(c, journal_full, c);
} }
can_discard = j->can_discard; can_discard = j->can_discard;
......
...@@ -1552,7 +1552,7 @@ static void do_journal_write(struct closure *cl) ...@@ -1552,7 +1552,7 @@ static void do_journal_write(struct closure *cl)
bch2_bio_map(bio, w->data, sectors << 9); bch2_bio_map(bio, w->data, sectors << 9);
trace_journal_write(bio); trace_and_count(c, journal_write, bio);
closure_bio_submit(bio, cl); closure_bio_submit(bio, cl);
ca->journal.bucket_seq[ca->journal.cur_idx] = ca->journal.bucket_seq[ca->journal.cur_idx] =
......
...@@ -642,7 +642,8 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) ...@@ -642,7 +642,8 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128); min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128);
trace_journal_reclaim_start(c, direct, kicked, trace_and_count(c, journal_reclaim_start, c,
direct, kicked,
min_nr, min_key_cache, min_nr, min_key_cache,
j->prereserved.reserved, j->prereserved.reserved,
j->prereserved.remaining, j->prereserved.remaining,
...@@ -658,7 +659,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) ...@@ -658,7 +659,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
j->nr_direct_reclaim += nr_flushed; j->nr_direct_reclaim += nr_flushed;
else else
j->nr_background_reclaim += nr_flushed; j->nr_background_reclaim += nr_flushed;
trace_journal_reclaim_finish(c, nr_flushed); trace_and_count(c, journal_reclaim_finish, c, nr_flushed);
if (nr_flushed) if (nr_flushed)
wake_up(&j->reclaim_wait); wake_up(&j->reclaim_wait);
......
...@@ -245,8 +245,8 @@ static int bch2_move_extent(struct btree_trans *trans, ...@@ -245,8 +245,8 @@ static int bch2_move_extent(struct btree_trans *trans,
atomic64_inc(&ctxt->stats->keys_moved); atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved); atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
trace_move_extent(k.k); trace_move_extent_read(k.k);
atomic_add(io->read_sectors, &ctxt->read_sectors); atomic_add(io->read_sectors, &ctxt->read_sectors);
list_add_tail(&io->list, &ctxt->reads); list_add_tail(&io->list, &ctxt->reads);
...@@ -268,7 +268,7 @@ static int bch2_move_extent(struct btree_trans *trans, ...@@ -268,7 +268,7 @@ static int bch2_move_extent(struct btree_trans *trans,
kfree(io); kfree(io);
err: err:
percpu_ref_put(&c->writes); percpu_ref_put(&c->writes);
trace_move_alloc_mem_fail(k.k); trace_and_count(c, move_extent_alloc_mem_fail, k.k);
return ret; return ret;
} }
......
...@@ -339,7 +339,7 @@ static int bch2_copygc(struct bch_fs *c) ...@@ -339,7 +339,7 @@ static int bch2_copygc(struct bch_fs *c)
atomic64_read(&move_stats.keys_raced), atomic64_read(&move_stats.keys_raced),
atomic64_read(&move_stats.sectors_raced)); atomic64_read(&move_stats.sectors_raced));
trace_copygc(c, trace_and_count(c, copygc, c,
atomic64_read(&move_stats.sectors_moved), sectors_not_moved, atomic64_read(&move_stats.sectors_moved), sectors_not_moved,
buckets_to_move, buckets_not_moved); buckets_to_move, buckets_not_moved);
return 0; return 0;
...@@ -397,7 +397,7 @@ static int bch2_copygc_thread(void *arg) ...@@ -397,7 +397,7 @@ static int bch2_copygc_thread(void *arg)
wait = bch2_copygc_wait_amount(c); wait = bch2_copygc_wait_amount(c);
if (wait > clock->max_slop) { if (wait > clock->max_slop) {
trace_copygc_wait(c, wait, last + wait); trace_and_count(c, copygc_wait, c, wait, last + wait);
c->copygc_wait = last + wait; c->copygc_wait = last + wait;
bch2_kthread_io_clock_wait(clock, last + wait, bch2_kthread_io_clock_wait(clock, last + wait,
MAX_SCHEDULE_TIMEOUT); MAX_SCHEDULE_TIMEOUT);
......
...@@ -801,7 +801,7 @@ int bch2_write_super(struct bch_fs *c) ...@@ -801,7 +801,7 @@ int bch2_write_super(struct bch_fs *c)
unsigned degraded_flags = BCH_FORCE_IF_DEGRADED; unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
int ret = 0; int ret = 0;
trace_write_super(c, _RET_IP_); trace_and_count(c, write_super, c, _RET_IP_);
if (c->opts.very_degraded) if (c->opts.very_degraded)
degraded_flags |= BCH_FORCE_IF_LOST; degraded_flags |= BCH_FORCE_IF_LOST;
......
...@@ -190,11 +190,6 @@ read_attribute(internal_uuid); ...@@ -190,11 +190,6 @@ read_attribute(internal_uuid);
read_attribute(has_data); read_attribute(has_data);
read_attribute(alloc_debug); read_attribute(alloc_debug);
read_attribute(read_realloc_races);
read_attribute(extent_migrate_done);
read_attribute(extent_migrate_raced);
read_attribute(bucket_alloc_fail);
#define x(t, n, ...) read_attribute(t); #define x(t, n, ...) read_attribute(t);
BCH_PERSISTENT_COUNTERS() BCH_PERSISTENT_COUNTERS()
#undef x #undef x
...@@ -378,15 +373,6 @@ SHOW(bch2_fs) ...@@ -378,15 +373,6 @@ SHOW(bch2_fs)
sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c)); sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c)); sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c));
sysfs_print(read_realloc_races,
atomic_long_read(&c->read_realloc_races));
sysfs_print(extent_migrate_done,
atomic_long_read(&c->extent_migrate_done));
sysfs_print(extent_migrate_raced,
atomic_long_read(&c->extent_migrate_raced));
sysfs_print(bucket_alloc_fail,
atomic_long_read(&c->bucket_alloc_fail));
sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic); sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
if (attr == &sysfs_gc_gens_pos) if (attr == &sysfs_gc_gens_pos)
...@@ -629,11 +615,6 @@ struct attribute *bch2_fs_internal_files[] = { ...@@ -629,11 +615,6 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_trigger_invalidates, &sysfs_trigger_invalidates,
&sysfs_prune_cache, &sysfs_prune_cache,
&sysfs_read_realloc_races,
&sysfs_extent_migrate_done,
&sysfs_extent_migrate_raced,
&sysfs_bucket_alloc_fail,
&sysfs_gc_gens_pos, &sysfs_gc_gens_pos,
&sysfs_copy_gc_enabled, &sysfs_copy_gc_enabled,
......
...@@ -52,6 +52,31 @@ DECLARE_EVENT_CLASS(bkey, ...@@ -52,6 +52,31 @@ DECLARE_EVENT_CLASS(bkey,
__entry->offset, __entry->size) __entry->offset, __entry->size)
); );
DECLARE_EVENT_CLASS(btree_node,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(u8, level )
__field(u8, btree_id )
TRACE_BPOS_entries(pos)
),
TP_fast_assign(
__entry->dev = c->dev;
__entry->level = b->c.level;
__entry->btree_id = b->c.btree_id;
TRACE_BPOS_assign(pos, b->key.k.p);
),
TP_printk("%d,%d %u %s %llu:%llu:%u",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->level,
bch2_btree_ids[__entry->btree_id],
__entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
);
DECLARE_EVENT_CLASS(bch_fs, DECLARE_EVENT_CLASS(bch_fs,
TP_PROTO(struct bch_fs *c), TP_PROTO(struct bch_fs *c),
TP_ARGS(c), TP_ARGS(c),
...@@ -112,7 +137,7 @@ TRACE_EVENT(write_super, ...@@ -112,7 +137,7 @@ TRACE_EVENT(write_super,
/* io.c: */ /* io.c: */
DEFINE_EVENT(bio, read_split, DEFINE_EVENT(bio, read_promote,
TP_PROTO(struct bio *bio), TP_PROTO(struct bio *bio),
TP_ARGS(bio) TP_ARGS(bio)
); );
...@@ -122,12 +147,17 @@ DEFINE_EVENT(bio, read_bounce, ...@@ -122,12 +147,17 @@ DEFINE_EVENT(bio, read_bounce,
TP_ARGS(bio) TP_ARGS(bio)
); );
DEFINE_EVENT(bio, read_split,
TP_PROTO(struct bio *bio),
TP_ARGS(bio)
);
DEFINE_EVENT(bio, read_retry, DEFINE_EVENT(bio, read_retry,
TP_PROTO(struct bio *bio), TP_PROTO(struct bio *bio),
TP_ARGS(bio) TP_ARGS(bio)
); );
DEFINE_EVENT(bio, promote, DEFINE_EVENT(bio, read_reuse_race,
TP_PROTO(struct bio *bio), TP_PROTO(struct bio *bio),
TP_ARGS(bio) TP_ARGS(bio)
); );
...@@ -220,8 +250,6 @@ TRACE_EVENT(journal_reclaim_finish, ...@@ -220,8 +250,6 @@ TRACE_EVENT(journal_reclaim_finish,
__entry->nr_flushed) __entry->nr_flushed)
); );
/* allocator: */
/* bset.c: */ /* bset.c: */
DEFINE_EVENT(bpos, bkey_pack_pos_fail, DEFINE_EVENT(bpos, bkey_pack_pos_fail,
...@@ -229,39 +257,61 @@ DEFINE_EVENT(bpos, bkey_pack_pos_fail, ...@@ -229,39 +257,61 @@ DEFINE_EVENT(bpos, bkey_pack_pos_fail,
TP_ARGS(p) TP_ARGS(p)
); );
/* Btree */ /* Btree cache: */
DECLARE_EVENT_CLASS(btree_node, TRACE_EVENT(btree_cache_scan,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(long nr_to_scan, long can_free, long ret),
TP_ARGS(c, b), TP_ARGS(nr_to_scan, can_free, ret),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev ) __field(long, nr_to_scan )
__field(u8, level ) __field(long, can_free )
__field(u8, btree_id ) __field(long, ret )
TRACE_BPOS_entries(pos)
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = c->dev; __entry->nr_to_scan = nr_to_scan;
__entry->level = b->c.level; __entry->can_free = can_free;
__entry->btree_id = b->c.btree_id; __entry->ret = ret;
TRACE_BPOS_assign(pos, b->key.k.p);
), ),
TP_printk("%d,%d %u %s %llu:%llu:%u", TP_printk("scanned for %li nodes, can free %li, ret %li",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_to_scan, __entry->can_free, __entry->ret)
__entry->level, );
bch2_btree_ids[__entry->btree_id],
__entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot) DEFINE_EVENT(btree_node, btree_cache_reap,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock_fail,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(bch_fs, btree_cache_cannibalize_lock,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(bch_fs, btree_cache_cannibalize,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
); );
DEFINE_EVENT(btree_node, btree_read, DEFINE_EVENT(bch_fs, btree_cache_cannibalize_unlock,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
/* Btree */
DEFINE_EVENT(btree_node, btree_node_read,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b) TP_ARGS(c, b)
); );
TRACE_EVENT(btree_write, TRACE_EVENT(btree_node_write,
TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors), TP_PROTO(struct btree *b, unsigned bytes, unsigned sectors),
TP_ARGS(b, bytes, sectors), TP_ARGS(b, bytes, sectors),
...@@ -291,31 +341,6 @@ DEFINE_EVENT(btree_node, btree_node_free, ...@@ -291,31 +341,6 @@ DEFINE_EVENT(btree_node, btree_node_free,
TP_ARGS(c, b) TP_ARGS(c, b)
); );
DEFINE_EVENT(btree_node, btree_node_reap,
TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b)
);
DEFINE_EVENT(bch_fs, btree_node_cannibalize_lock_fail,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(bch_fs, btree_node_cannibalize_lock,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(bch_fs, btree_node_cannibalize,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
DEFINE_EVENT(bch_fs, btree_node_cannibalize_unlock,
TP_PROTO(struct bch_fs *c),
TP_ARGS(c)
);
TRACE_EVENT(btree_reserve_get_fail, TRACE_EVENT(btree_reserve_get_fail,
TP_PROTO(const char *trans_fn, TP_PROTO(const char *trans_fn,
unsigned long caller_ip, unsigned long caller_ip,
...@@ -340,52 +365,32 @@ TRACE_EVENT(btree_reserve_get_fail, ...@@ -340,52 +365,32 @@ TRACE_EVENT(btree_reserve_get_fail,
__entry->required) __entry->required)
); );
DEFINE_EVENT(btree_node, btree_split, DEFINE_EVENT(btree_node, btree_node_compact,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b) TP_ARGS(c, b)
); );
DEFINE_EVENT(btree_node, btree_compact, DEFINE_EVENT(btree_node, btree_node_merge,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b) TP_ARGS(c, b)
); );
DEFINE_EVENT(btree_node, btree_merge, DEFINE_EVENT(btree_node, btree_node_split,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b) TP_ARGS(c, b)
); );
DEFINE_EVENT(btree_node, btree_rewrite, DEFINE_EVENT(btree_node, btree_node_rewrite,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b) TP_ARGS(c, b)
); );
DEFINE_EVENT(btree_node, btree_set_root, DEFINE_EVENT(btree_node, btree_node_set_root,
TP_PROTO(struct bch_fs *c, struct btree *b), TP_PROTO(struct bch_fs *c, struct btree *b),
TP_ARGS(c, b) TP_ARGS(c, b)
); );
TRACE_EVENT(btree_cache_scan, TRACE_EVENT(btree_path_relock_fail,
TP_PROTO(long nr_to_scan, long can_free, long ret),
TP_ARGS(nr_to_scan, can_free, ret),
TP_STRUCT__entry(
__field(long, nr_to_scan )
__field(long, can_free )
__field(long, ret )
),
TP_fast_assign(
__entry->nr_to_scan = nr_to_scan;
__entry->can_free = can_free;
__entry->ret = ret;
),
TP_printk("scanned for %li nodes, can free %li, ret %li",
__entry->nr_to_scan, __entry->can_free, __entry->ret)
);
TRACE_EVENT(btree_node_relock_fail,
TP_PROTO(struct btree_trans *trans, TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip, unsigned long caller_ip,
struct btree_path *path, struct btree_path *path,
...@@ -429,7 +434,7 @@ TRACE_EVENT(btree_node_relock_fail, ...@@ -429,7 +434,7 @@ TRACE_EVENT(btree_node_relock_fail,
__entry->node_lock_seq) __entry->node_lock_seq)
); );
TRACE_EVENT(btree_node_upgrade_fail, TRACE_EVENT(btree_path_upgrade_fail,
TP_PROTO(struct btree_trans *trans, TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip, unsigned long caller_ip,
struct btree_path *path, struct btree_path *path,
...@@ -617,7 +622,7 @@ TRACE_EVENT(discard_buckets, ...@@ -617,7 +622,7 @@ TRACE_EVENT(discard_buckets,
__entry->err) __entry->err)
); );
TRACE_EVENT(invalidate_bucket, TRACE_EVENT(bucket_invalidate,
TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors), TP_PROTO(struct bch_fs *c, unsigned dev, u64 bucket, u32 sectors),
TP_ARGS(c, dev, bucket, sectors), TP_ARGS(c, dev, bucket, sectors),
...@@ -643,17 +648,27 @@ TRACE_EVENT(invalidate_bucket, ...@@ -643,17 +648,27 @@ TRACE_EVENT(invalidate_bucket,
/* Moving IO */ /* Moving IO */
DEFINE_EVENT(bkey, move_extent, DEFINE_EVENT(bkey, move_extent_read,
TP_PROTO(const struct bkey *k),
TP_ARGS(k)
);
DEFINE_EVENT(bkey, move_extent_write,
TP_PROTO(const struct bkey *k), TP_PROTO(const struct bkey *k),
TP_ARGS(k) TP_ARGS(k)
); );
DEFINE_EVENT(bkey, move_alloc_mem_fail, DEFINE_EVENT(bkey, move_extent_finish,
TP_PROTO(const struct bkey *k), TP_PROTO(const struct bkey *k),
TP_ARGS(k) TP_ARGS(k)
); );
DEFINE_EVENT(bkey, move_race, DEFINE_EVENT(bkey, move_extent_fail,
TP_PROTO(const struct bkey *k),
TP_ARGS(k)
);
DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
TP_PROTO(const struct bkey *k), TP_PROTO(const struct bkey *k),
TP_ARGS(k) TP_ARGS(k)
); );
...@@ -732,11 +747,6 @@ TRACE_EVENT(copygc_wait, ...@@ -732,11 +747,6 @@ TRACE_EVENT(copygc_wait,
__entry->wait_amount, __entry->until) __entry->wait_amount, __entry->until)
); );
DEFINE_EVENT(bpos, data_update_fail,
TP_PROTO(const struct bpos *p),
TP_ARGS(p)
);
/* btree transactions: */ /* btree transactions: */
DECLARE_EVENT_CLASS(transaction_event, DECLARE_EVENT_CLASS(transaction_event,
...@@ -763,7 +773,7 @@ DEFINE_EVENT(transaction_event, transaction_commit, ...@@ -763,7 +773,7 @@ DEFINE_EVENT(transaction_event, transaction_commit,
TP_ARGS(trans, caller_ip) TP_ARGS(trans, caller_ip)
); );
DEFINE_EVENT(transaction_event, transaction_restart_injected, DEFINE_EVENT(transaction_event, trans_restart_injected,
TP_PROTO(struct btree_trans *trans, TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip), unsigned long caller_ip),
TP_ARGS(trans, caller_ip) TP_ARGS(trans, caller_ip)
...@@ -926,7 +936,7 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill, ...@@ -926,7 +936,7 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_after_fill,
TP_ARGS(trans, caller_ip, path) TP_ARGS(trans, caller_ip, path)
); );
DEFINE_EVENT(transaction_event, transaction_restart_key_cache_upgrade, DEFINE_EVENT(transaction_event, trans_restart_key_cache_upgrade,
TP_PROTO(struct btree_trans *trans, TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip), unsigned long caller_ip),
TP_ARGS(trans, caller_ip) TP_ARGS(trans, caller_ip)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment