Commit 8e7834a8 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: bch_fs_usage_base

Split out base filesystem usage into its own type; prep work for
breaking up bch2_trans_fs_usage_apply().
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 4f564f4f
...@@ -1253,19 +1253,19 @@ static int bch2_gc_done(struct bch_fs *c, ...@@ -1253,19 +1253,19 @@ static int bch2_gc_done(struct bch_fs *c,
bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr); bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
copy_fs_field(fs_usage_hidden_wrong, copy_fs_field(fs_usage_hidden_wrong,
hidden, "hidden"); b.hidden, "hidden");
copy_fs_field(fs_usage_btree_wrong, copy_fs_field(fs_usage_btree_wrong,
btree, "btree"); b.btree, "btree");
if (!metadata_only) { if (!metadata_only) {
copy_fs_field(fs_usage_data_wrong, copy_fs_field(fs_usage_data_wrong,
data, "data"); b.data, "data");
copy_fs_field(fs_usage_cached_wrong, copy_fs_field(fs_usage_cached_wrong,
cached, "cached"); b.cached, "cached");
copy_fs_field(fs_usage_reserved_wrong, copy_fs_field(fs_usage_reserved_wrong,
reserved, "reserved"); b.reserved, "reserved");
copy_fs_field(fs_usage_nr_inodes_wrong, copy_fs_field(fs_usage_nr_inodes_wrong,
nr_inodes,"nr_inodes"); b.nr_inodes,"nr_inodes");
for (i = 0; i < BCH_REPLICAS_MAX; i++) for (i = 0; i < BCH_REPLICAS_MAX; i++)
copy_fs_field(fs_usage_persistent_reserved_wrong, copy_fs_field(fs_usage_persistent_reserved_wrong,
......
...@@ -430,6 +430,9 @@ struct btree_trans { ...@@ -430,6 +430,9 @@ struct btree_trans {
struct journal_res journal_res; struct journal_res journal_res;
u64 *journal_seq; u64 *journal_seq;
struct disk_reservation *disk_res; struct disk_reservation *disk_res;
struct bch_fs_usage_base fs_usage_delta;
unsigned journal_u64s; unsigned journal_u64s;
unsigned extra_disk_res; /* XXX kill */ unsigned extra_disk_res; /* XXX kill */
struct replicas_delta_list *fs_usage_deltas; struct replicas_delta_list *fs_usage_deltas;
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include <linux/preempt.h> #include <linux/preempt.h>
static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage, static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage,
enum bch_data_type data_type, enum bch_data_type data_type,
s64 sectors) s64 sectors)
{ {
...@@ -54,20 +54,20 @@ void bch2_fs_usage_initialize(struct bch_fs *c) ...@@ -54,20 +54,20 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
bch2_fs_usage_acc_to_base(c, i); bch2_fs_usage_acc_to_base(c, i);
for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++) for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++)
usage->reserved += usage->persistent_reserved[i]; usage->b.reserved += usage->persistent_reserved[i];
for (unsigned i = 0; i < c->replicas.nr; i++) { for (unsigned i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry_v1 *e = struct bch_replicas_entry_v1 *e =
cpu_replicas_entry(&c->replicas, i); cpu_replicas_entry(&c->replicas, i);
fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]); fs_usage_data_type_to_base(&usage->b, e->data_type, usage->replicas[i]);
} }
for_each_member_device(c, ca) { for_each_member_device(c, ca) {
struct bch_dev_usage dev = bch2_dev_usage_read(ca); struct bch_dev_usage dev = bch2_dev_usage_read(ca);
usage->hidden += (dev.d[BCH_DATA_sb].buckets + usage->b.hidden += (dev.d[BCH_DATA_sb].buckets +
dev.d[BCH_DATA_journal].buckets) * dev.d[BCH_DATA_journal].buckets) *
ca->mi.bucket_size; ca->mi.bucket_size;
} }
...@@ -188,15 +188,15 @@ void bch2_fs_usage_to_text(struct printbuf *out, ...@@ -188,15 +188,15 @@ void bch2_fs_usage_to_text(struct printbuf *out,
prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity); prt_printf(out, "capacity:\t\t\t%llu\n", c->capacity);
prt_printf(out, "hidden:\t\t\t\t%llu\n", prt_printf(out, "hidden:\t\t\t\t%llu\n",
fs_usage->u.hidden); fs_usage->u.b.hidden);
prt_printf(out, "data:\t\t\t\t%llu\n", prt_printf(out, "data:\t\t\t\t%llu\n",
fs_usage->u.data); fs_usage->u.b.data);
prt_printf(out, "cached:\t\t\t\t%llu\n", prt_printf(out, "cached:\t\t\t\t%llu\n",
fs_usage->u.cached); fs_usage->u.b.cached);
prt_printf(out, "reserved:\t\t\t%llu\n", prt_printf(out, "reserved:\t\t\t%llu\n",
fs_usage->u.reserved); fs_usage->u.b.reserved);
prt_printf(out, "nr_inodes:\t\t\t%llu\n", prt_printf(out, "nr_inodes:\t\t\t%llu\n",
fs_usage->u.nr_inodes); fs_usage->u.b.nr_inodes);
prt_printf(out, "online reserved:\t\t%llu\n", prt_printf(out, "online reserved:\t\t%llu\n",
fs_usage->online_reserved); fs_usage->online_reserved);
...@@ -225,10 +225,10 @@ static u64 reserve_factor(u64 r) ...@@ -225,10 +225,10 @@ static u64 reserve_factor(u64 r)
u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage) u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage_online *fs_usage)
{ {
return min(fs_usage->u.hidden + return min(fs_usage->u.b.hidden +
fs_usage->u.btree + fs_usage->u.b.btree +
fs_usage->u.data + fs_usage->u.b.data +
reserve_factor(fs_usage->u.reserved + reserve_factor(fs_usage->u.b.reserved +
fs_usage->online_reserved), fs_usage->online_reserved),
c->capacity); c->capacity);
} }
...@@ -240,17 +240,17 @@ __bch2_fs_usage_read_short(struct bch_fs *c) ...@@ -240,17 +240,17 @@ __bch2_fs_usage_read_short(struct bch_fs *c)
u64 data, reserved; u64 data, reserved;
ret.capacity = c->capacity - ret.capacity = c->capacity -
bch2_fs_usage_read_one(c, &c->usage_base->hidden); bch2_fs_usage_read_one(c, &c->usage_base->b.hidden);
data = bch2_fs_usage_read_one(c, &c->usage_base->data) + data = bch2_fs_usage_read_one(c, &c->usage_base->b.data) +
bch2_fs_usage_read_one(c, &c->usage_base->btree); bch2_fs_usage_read_one(c, &c->usage_base->b.btree);
reserved = bch2_fs_usage_read_one(c, &c->usage_base->reserved) + reserved = bch2_fs_usage_read_one(c, &c->usage_base->b.reserved) +
percpu_u64_get(c->online_reserved); percpu_u64_get(c->online_reserved);
ret.used = min(ret.capacity, data + reserve_factor(reserved)); ret.used = min(ret.capacity, data + reserve_factor(reserved));
ret.free = ret.capacity - ret.used; ret.free = ret.capacity - ret.used;
ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->nr_inodes); ret.nr_inodes = bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes);
return ret; return ret;
} }
...@@ -308,9 +308,9 @@ void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca, ...@@ -308,9 +308,9 @@ void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
fs_usage = fs_usage_ptr(c, journal_seq, gc); fs_usage = fs_usage_ptr(c, journal_seq, gc);
if (data_type_is_hidden(old->data_type)) if (data_type_is_hidden(old->data_type))
fs_usage->hidden -= ca->mi.bucket_size; fs_usage->b.hidden -= ca->mi.bucket_size;
if (data_type_is_hidden(new->data_type)) if (data_type_is_hidden(new->data_type))
fs_usage->hidden += ca->mi.bucket_size; fs_usage->b.hidden += ca->mi.bucket_size;
u = dev_usage_ptr(ca, journal_seq, gc); u = dev_usage_ptr(ca, journal_seq, gc);
...@@ -359,7 +359,7 @@ static inline int __update_replicas(struct bch_fs *c, ...@@ -359,7 +359,7 @@ static inline int __update_replicas(struct bch_fs *c,
if (idx < 0) if (idx < 0)
return -1; return -1;
fs_usage_data_type_to_base(fs_usage, r->data_type, sectors); fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors);
fs_usage->replicas[idx] += sectors; fs_usage->replicas[idx] += sectors;
return 0; return 0;
} }
...@@ -394,7 +394,7 @@ int bch2_update_replicas(struct bch_fs *c, struct bkey_s_c k, ...@@ -394,7 +394,7 @@ int bch2_update_replicas(struct bch_fs *c, struct bkey_s_c k,
preempt_disable(); preempt_disable();
fs_usage = fs_usage_ptr(c, journal_seq, gc); fs_usage = fs_usage_ptr(c, journal_seq, gc);
fs_usage_data_type_to_base(fs_usage, r->data_type, sectors); fs_usage_data_type_to_base(&fs_usage->b, r->data_type, sectors);
fs_usage->replicas[idx] += sectors; fs_usage->replicas[idx] += sectors;
preempt_enable(); preempt_enable();
err: err:
...@@ -677,11 +677,11 @@ void bch2_trans_fs_usage_revert(struct btree_trans *trans, ...@@ -677,11 +677,11 @@ void bch2_trans_fs_usage_revert(struct btree_trans *trans,
BUG_ON(__update_replicas(c, dst, &d->r, -d->delta)); BUG_ON(__update_replicas(c, dst, &d->r, -d->delta));
} }
dst->nr_inodes -= deltas->nr_inodes; dst->b.nr_inodes -= deltas->nr_inodes;
for (i = 0; i < BCH_REPLICAS_MAX; i++) { for (i = 0; i < BCH_REPLICAS_MAX; i++) {
added -= deltas->persistent_reserved[i]; added -= deltas->persistent_reserved[i];
dst->reserved -= deltas->persistent_reserved[i]; dst->b.reserved -= deltas->persistent_reserved[i];
dst->persistent_reserved[i] -= deltas->persistent_reserved[i]; dst->persistent_reserved[i] -= deltas->persistent_reserved[i];
} }
...@@ -723,11 +723,11 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans, ...@@ -723,11 +723,11 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
goto need_mark; goto need_mark;
} }
dst->nr_inodes += deltas->nr_inodes; dst->b.nr_inodes += deltas->nr_inodes;
for (i = 0; i < BCH_REPLICAS_MAX; i++) { for (i = 0; i < BCH_REPLICAS_MAX; i++) {
added += deltas->persistent_reserved[i]; added += deltas->persistent_reserved[i];
dst->reserved += deltas->persistent_reserved[i]; dst->b.reserved += deltas->persistent_reserved[i];
dst->persistent_reserved[i] += deltas->persistent_reserved[i]; dst->persistent_reserved[i] += deltas->persistent_reserved[i];
} }
...@@ -1084,7 +1084,7 @@ static int __trigger_reservation(struct btree_trans *trans, ...@@ -1084,7 +1084,7 @@ static int __trigger_reservation(struct btree_trans *trans,
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage_gc); struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage_gc);
replicas = min(replicas, ARRAY_SIZE(fs_usage->persistent_reserved)); replicas = min(replicas, ARRAY_SIZE(fs_usage->persistent_reserved));
fs_usage->reserved += sectors; fs_usage->b.reserved += sectors;
fs_usage->persistent_reserved[replicas - 1] += sectors; fs_usage->persistent_reserved[replicas - 1] += sectors;
preempt_enable(); preempt_enable();
......
...@@ -45,23 +45,18 @@ struct bch_dev_usage { ...@@ -45,23 +45,18 @@ struct bch_dev_usage {
} d[BCH_DATA_NR]; } d[BCH_DATA_NR];
}; };
struct bch_fs_usage { struct bch_fs_usage_base {
/* all fields are in units of 512 byte sectors: */
u64 hidden; u64 hidden;
u64 btree; u64 btree;
u64 data; u64 data;
u64 cached; u64 cached;
u64 reserved; u64 reserved;
u64 nr_inodes; u64 nr_inodes;
};
/* XXX: add stats for compression ratio */ struct bch_fs_usage {
#if 0 /* all fields are in units of 512 byte sectors: */
u64 uncompressed; struct bch_fs_usage_base b;
u64 compressed;
#endif
/* broken out: */
u64 persistent_reserved[BCH_REPLICAS_MAX]; u64 persistent_reserved[BCH_REPLICAS_MAX];
u64 replicas[]; u64 replicas[];
}; };
......
...@@ -597,7 +597,7 @@ int bch2_trigger_inode(struct btree_trans *trans, ...@@ -597,7 +597,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
this_cpu_add(c->usage_gc->nr_inodes, nr); this_cpu_add(c->usage_gc->b.nr_inodes, nr);
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
} }
......
...@@ -280,7 +280,7 @@ static int journal_replay_entry_early(struct bch_fs *c, ...@@ -280,7 +280,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
le64_to_cpu(u->v); le64_to_cpu(u->v);
break; break;
case BCH_FS_USAGE_inodes: case BCH_FS_USAGE_inodes:
c->usage_base->nr_inodes = le64_to_cpu(u->v); c->usage_base->b.nr_inodes = le64_to_cpu(u->v);
break; break;
case BCH_FS_USAGE_key_version: case BCH_FS_USAGE_key_version:
atomic64_set(&c->key_version, atomic64_set(&c->key_version,
......
...@@ -207,7 +207,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c, ...@@ -207,7 +207,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
u->entry.type = BCH_JSET_ENTRY_usage; u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = BCH_FS_USAGE_inodes; u->entry.btree_id = BCH_FS_USAGE_inodes;
u->v = cpu_to_le64(c->usage_base->nr_inodes); u->v = cpu_to_le64(c->usage_base->b.nr_inodes);
} }
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment