Commit 96a363a7 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: move: move_stats refactoring

data_progress_list is gone - it was redundant with moving_context_list

The upcoming rebalance rewrite is going to have it using two different
move_stats objects with the same moving_context, depending on whether
it's scanning or using the rebalance_work btree - this patch plumbs
stats around a bit differently so that will work.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent d5eade93
...@@ -939,9 +939,6 @@ struct bch_fs { ...@@ -939,9 +939,6 @@ struct bch_fs {
struct list_head moving_context_list; struct list_head moving_context_list;
struct mutex moving_context_lock; struct mutex moving_context_lock;
struct list_head data_progress_list;
struct mutex data_progress_lock;
/* REBALANCE */ /* REBALANCE */
struct bch_fs_rebalance rebalance; struct bch_fs_rebalance rebalance;
......
...@@ -440,7 +440,7 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -440,7 +440,7 @@ int bch2_data_update_init(struct btree_trans *trans,
m->btree_id = btree_id; m->btree_id = btree_id;
m->data_opts = data_opts; m->data_opts = data_opts;
m->ctxt = ctxt; m->ctxt = ctxt;
m->stats = ctxt->stats; m->stats = ctxt ? ctxt->stats : NULL;
bch2_write_op_init(&m->op, c, io_opts); bch2_write_op_init(&m->op, c, io_opts);
m->op.pos = bkey_start_pos(k.k); m->op.pos = bkey_start_pos(k.k);
......
...@@ -60,20 +60,6 @@ static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c ...@@ -60,20 +60,6 @@ static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c
} }
} }
static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats)
{
mutex_lock(&c->data_progress_lock);
list_add(&stats->list, &c->data_progress_list);
mutex_unlock(&c->data_progress_lock);
}
static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats)
{
mutex_lock(&c->data_progress_lock);
list_del(&stats->list);
mutex_unlock(&c->data_progress_lock);
}
struct moving_io { struct moving_io {
struct list_head read_list; struct list_head read_list;
struct list_head io_list; struct list_head io_list;
...@@ -190,13 +176,6 @@ void bch2_moving_ctxt_exit(struct moving_context *ctxt) ...@@ -190,13 +176,6 @@ void bch2_moving_ctxt_exit(struct moving_context *ctxt)
EBUG_ON(atomic_read(&ctxt->read_sectors)); EBUG_ON(atomic_read(&ctxt->read_sectors));
EBUG_ON(atomic_read(&ctxt->read_ios)); EBUG_ON(atomic_read(&ctxt->read_ios));
if (ctxt->stats) {
progress_list_del(c, ctxt->stats);
trace_move_data(c,
atomic64_read(&ctxt->stats->sectors_moved),
atomic64_read(&ctxt->stats->keys_moved));
}
mutex_lock(&c->moving_context_lock); mutex_lock(&c->moving_context_lock);
list_del(&ctxt->list); list_del(&ctxt->list);
mutex_unlock(&c->moving_context_lock); mutex_unlock(&c->moving_context_lock);
...@@ -231,16 +210,17 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt, ...@@ -231,16 +210,17 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt,
mutex_lock(&c->moving_context_lock); mutex_lock(&c->moving_context_lock);
list_add(&ctxt->list, &c->moving_context_list); list_add(&ctxt->list, &c->moving_context_list);
mutex_unlock(&c->moving_context_lock); mutex_unlock(&c->moving_context_lock);
}
if (stats) { void bch2_move_stats_exit(struct bch_move_stats *stats, struct bch_fs *c)
progress_list_add(c, stats); {
stats->data_type = BCH_DATA_user; trace_move_data(c, stats);
}
} }
void bch2_move_stats_init(struct bch_move_stats *stats, char *name) void bch2_move_stats_init(struct bch_move_stats *stats, char *name)
{ {
memset(stats, 0, sizeof(*stats)); memset(stats, 0, sizeof(*stats));
stats->data_type = BCH_DATA_user;
scnprintf(stats->name, sizeof(stats->name), "%s", name); scnprintf(stats->name, sizeof(stats->name), "%s", name);
} }
...@@ -303,6 +283,8 @@ int bch2_move_extent(struct moving_context *ctxt, ...@@ -303,6 +283,8 @@ int bch2_move_extent(struct moving_context *ctxt,
unsigned sectors = k.k->size, pages; unsigned sectors = k.k->size, pages;
int ret = -ENOMEM; int ret = -ENOMEM;
if (ctxt->stats)
ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos);
trace_move_extent2(c, k); trace_move_extent2(c, k);
bch2_data_update_opts_normalize(k, &data_opts); bch2_data_update_opts_normalize(k, &data_opts);
...@@ -878,14 +860,18 @@ static int bch2_move_btree(struct bch_fs *c, ...@@ -878,14 +860,18 @@ static int bch2_move_btree(struct bch_fs *c,
{ {
bool kthread = (current->flags & PF_KTHREAD) != 0; bool kthread = (current->flags & PF_KTHREAD) != 0;
struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
struct btree_trans *trans = bch2_trans_get(c); struct moving_context ctxt;
struct btree_trans *trans;
struct btree_iter iter; struct btree_iter iter;
struct btree *b; struct btree *b;
enum btree_id id; enum btree_id id;
struct data_update_opts data_opts; struct data_update_opts data_opts;
int ret = 0; int ret = 0;
progress_list_add(c, stats); bch2_moving_ctxt_init(&ctxt, c, NULL, stats,
writepoint_ptr(&c->btree_write_point),
true);
trans = ctxt.trans;
stats->data_type = BCH_DATA_btree; stats->data_type = BCH_DATA_btree;
...@@ -933,14 +919,10 @@ static int bch2_move_btree(struct bch_fs *c, ...@@ -933,14 +919,10 @@ static int bch2_move_btree(struct bch_fs *c,
break; break;
} }
bch2_trans_put(trans);
if (ret)
bch_err_fn(c, ret); bch_err_fn(c, ret);
bch2_moving_ctxt_exit(&ctxt);
bch2_btree_interior_updates_flush(c); bch2_btree_interior_updates_flush(c);
progress_list_del(c, stats);
return ret; return ret;
} }
...@@ -1061,7 +1043,6 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats) ...@@ -1061,7 +1043,6 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
mutex_unlock(&c->sb_lock); mutex_unlock(&c->sb_lock);
} }
if (ret)
bch_err_fn(c, ret); bch_err_fn(c, ret);
return ret; return ret;
} }
...@@ -1093,6 +1074,8 @@ int bch2_data_job(struct bch_fs *c, ...@@ -1093,6 +1074,8 @@ int bch2_data_job(struct bch_fs *c,
true, true,
rereplicate_pred, c) ?: ret; rereplicate_pred, c) ?: ret;
ret = bch2_replicas_gc2(c) ?: ret; ret = bch2_replicas_gc2(c) ?: ret;
bch2_move_stats_exit(stats, c);
break; break;
case BCH_DATA_OP_MIGRATE: case BCH_DATA_OP_MIGRATE:
if (op.migrate.dev >= c->sb.nr_devices) if (op.migrate.dev >= c->sb.nr_devices)
...@@ -1117,10 +1100,13 @@ int bch2_data_job(struct bch_fs *c, ...@@ -1117,10 +1100,13 @@ int bch2_data_job(struct bch_fs *c,
true, true,
migrate_pred, &op) ?: ret; migrate_pred, &op) ?: ret;
ret = bch2_replicas_gc2(c) ?: ret; ret = bch2_replicas_gc2(c) ?: ret;
bch2_move_stats_exit(stats, c);
break; break;
case BCH_DATA_OP_REWRITE_OLD_NODES: case BCH_DATA_OP_REWRITE_OLD_NODES:
bch2_move_stats_init(stats, "rewrite_old_nodes"); bch2_move_stats_init(stats, "rewrite_old_nodes");
ret = bch2_scan_old_btree_nodes(c, stats); ret = bch2_scan_old_btree_nodes(c, stats);
bch2_move_stats_exit(stats, c);
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
...@@ -1129,18 +1115,43 @@ int bch2_data_job(struct bch_fs *c, ...@@ -1129,18 +1115,43 @@ int bch2_data_job(struct bch_fs *c,
return ret; return ret;
} }
static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt) void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
{ {
struct bch_move_stats *stats = ctxt->stats; prt_printf(out, "%s: data type=%s pos=",
struct moving_io *io; stats->name,
bch2_data_types[stats->data_type]);
bch2_bbpos_to_text(out, stats->pos);
prt_newline(out);
printbuf_indent_add(out, 2);
prt_printf(out, "%s (%ps):", stats->name, ctxt->fn); prt_str(out, "keys moved: ");
prt_u64(out, atomic64_read(&stats->keys_moved));
prt_newline(out); prt_newline(out);
prt_printf(out, " data type %s position: ", prt_str(out, "keys raced: ");
bch2_data_types[stats->data_type]); prt_u64(out, atomic64_read(&stats->keys_raced));
bch2_bbpos_to_text(out, stats->pos); prt_newline(out);
prt_str(out, "bytes seen: ");
prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9);
prt_newline(out);
prt_str(out, "bytes moved: ");
prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9);
prt_newline(out);
prt_str(out, "bytes raced: ");
prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9);
prt_newline(out); prt_newline(out);
printbuf_indent_sub(out, 2);
}
static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt)
{
struct moving_io *io;
bch2_move_stats_to_text(out, ctxt->stats);
printbuf_indent_add(out, 2); printbuf_indent_add(out, 2);
prt_printf(out, "reads: ios %u/%u sectors %u/%u", prt_printf(out, "reads: ios %u/%u sectors %u/%u",
...@@ -1181,7 +1192,4 @@ void bch2_fs_move_init(struct bch_fs *c) ...@@ -1181,7 +1192,4 @@ void bch2_fs_move_init(struct bch_fs *c)
{ {
INIT_LIST_HEAD(&c->moving_context_list); INIT_LIST_HEAD(&c->moving_context_list);
mutex_init(&c->moving_context_lock); mutex_init(&c->moving_context_lock);
INIT_LIST_HEAD(&c->data_progress_list);
mutex_init(&c->data_progress_lock);
} }
...@@ -127,7 +127,10 @@ int bch2_data_job(struct bch_fs *, ...@@ -127,7 +127,10 @@ int bch2_data_job(struct bch_fs *,
struct bch_move_stats *, struct bch_move_stats *,
struct bch_ioctl_data); struct bch_ioctl_data);
void bch2_move_stats_init(struct bch_move_stats *stats, char *name); void bch2_move_stats_to_text(struct printbuf *, struct bch_move_stats *);
void bch2_move_stats_exit(struct bch_move_stats *, struct bch_fs *);
void bch2_move_stats_init(struct bch_move_stats *, char *);
void bch2_fs_moving_ctxts_to_text(struct printbuf *, struct bch_fs *); void bch2_fs_moving_ctxts_to_text(struct printbuf *, struct bch_fs *);
void bch2_fs_move_init(struct bch_fs *); void bch2_fs_move_init(struct bch_fs *);
......
...@@ -7,13 +7,12 @@ ...@@ -7,13 +7,12 @@
struct bch_move_stats { struct bch_move_stats {
enum bch_data_type data_type; enum bch_data_type data_type;
struct bbpos pos; struct bbpos pos;
struct list_head list;
char name[32]; char name[32];
atomic64_t keys_moved; atomic64_t keys_moved;
atomic64_t keys_raced; atomic64_t keys_raced;
atomic64_t sectors_moved;
atomic64_t sectors_seen; atomic64_t sectors_seen;
atomic64_t sectors_moved;
atomic64_t sectors_raced; atomic64_t sectors_raced;
}; };
......
...@@ -361,6 +361,7 @@ static int bch2_copygc_thread(void *arg) ...@@ -361,6 +361,7 @@ static int bch2_copygc_thread(void *arg)
move_buckets_wait(&ctxt, &buckets, true); move_buckets_wait(&ctxt, &buckets, true);
rhashtable_destroy(&buckets.table); rhashtable_destroy(&buckets.table);
bch2_moving_ctxt_exit(&ctxt); bch2_moving_ctxt_exit(&ctxt);
bch2_move_stats_exit(&move_stats, c);
return 0; return 0;
} }
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "btree_locking.h" #include "btree_locking.h"
#include "btree_update_interior.h" #include "btree_update_interior.h"
#include "keylist.h" #include "keylist.h"
#include "move_types.h"
#include "opts.h" #include "opts.h"
#include "six.h" #include "six.h"
......
...@@ -767,25 +767,36 @@ DEFINE_EVENT(bkey, move_extent_alloc_mem_fail, ...@@ -767,25 +767,36 @@ DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
); );
TRACE_EVENT(move_data, TRACE_EVENT(move_data,
TP_PROTO(struct bch_fs *c, u64 sectors_moved, TP_PROTO(struct bch_fs *c,
u64 keys_moved), struct bch_move_stats *stats),
TP_ARGS(c, sectors_moved, keys_moved), TP_ARGS(c, stats),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev ) __field(dev_t, dev )
__field(u64, sectors_moved )
__field(u64, keys_moved ) __field(u64, keys_moved )
__field(u64, keys_raced )
__field(u64, sectors_seen )
__field(u64, sectors_moved )
__field(u64, sectors_raced )
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = c->dev; __entry->dev = c->dev;
__entry->sectors_moved = sectors_moved; __entry->keys_moved = atomic64_read(&stats->keys_moved);
__entry->keys_moved = keys_moved; __entry->keys_raced = atomic64_read(&stats->keys_raced);
__entry->sectors_seen = atomic64_read(&stats->sectors_seen);
__entry->sectors_moved = atomic64_read(&stats->sectors_moved);
__entry->sectors_raced = atomic64_read(&stats->sectors_raced);
), ),
TP_printk("%d,%d sectors_moved %llu keys_moved %llu", TP_printk("%d,%d keys moved %llu raced %llu"
"sectors seen %llu moved %llu raced %llu",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->sectors_moved, __entry->keys_moved) __entry->keys_moved,
__entry->keys_raced,
__entry->sectors_seen,
__entry->sectors_moved,
__entry->sectors_raced)
); );
TRACE_EVENT(evacuate_bucket, TRACE_EVENT(evacuate_bucket,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment