Commit 41b84fb4 authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: for_each_member_device_rcu() now declares loop iter

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 9fea2274
...@@ -891,7 +891,6 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos ...@@ -891,7 +891,6 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
static bool next_bucket(struct bch_fs *c, struct bpos *bucket) static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
{ {
struct bch_dev *ca; struct bch_dev *ca;
unsigned iter;
if (bch2_dev_bucket_exists(c, *bucket)) if (bch2_dev_bucket_exists(c, *bucket))
return true; return true;
...@@ -909,8 +908,7 @@ static bool next_bucket(struct bch_fs *c, struct bpos *bucket) ...@@ -909,8 +908,7 @@ static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
} }
rcu_read_lock(); rcu_read_lock();
iter = bucket->inode; ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
ca = __bch2_next_dev(c, &iter, NULL);
if (ca) if (ca)
*bucket = POS(ca->dev_idx, ca->mi.first_bucket); *bucket = POS(ca->dev_idx, ca->mi.first_bucket);
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -69,11 +69,8 @@ const char * const bch2_watermarks[] = { ...@@ -69,11 +69,8 @@ const char * const bch2_watermarks[] = {
void bch2_reset_alloc_cursors(struct bch_fs *c) void bch2_reset_alloc_cursors(struct bch_fs *c)
{ {
struct bch_dev *ca;
unsigned i;
rcu_read_lock(); rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL) for_each_member_device_rcu(c, ca, NULL)
ca->alloc_cursor = 0; ca->alloc_cursor = 0;
rcu_read_unlock(); rcu_read_unlock();
} }
......
...@@ -154,8 +154,7 @@ struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c) ...@@ -154,8 +154,7 @@ struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx) void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
{ {
struct bch_dev *ca; unsigned u64s = fs_usage_u64s(c);
unsigned i, u64s = fs_usage_u64s(c);
BUG_ON(idx >= ARRAY_SIZE(c->usage)); BUG_ON(idx >= ARRAY_SIZE(c->usage));
...@@ -167,7 +166,7 @@ void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx) ...@@ -167,7 +166,7 @@ void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
percpu_memset(c->usage[idx], 0, u64s * sizeof(u64)); percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
rcu_read_lock(); rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL) { for_each_member_device_rcu(c, ca, NULL) {
u64s = dev_usage_u64s(); u64s = dev_usage_u64s();
acc_u64s_percpu((u64 *) ca->usage_base, acc_u64s_percpu((u64 *) ca->usage_base,
......
...@@ -89,19 +89,14 @@ static int bch2_sb_disk_groups_validate(struct bch_sb *sb, ...@@ -89,19 +89,14 @@ static int bch2_sb_disk_groups_validate(struct bch_sb *sb,
void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c) void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
{ {
struct bch_disk_groups_cpu *g;
struct bch_dev *ca;
int i;
unsigned iter;
out->atomic++; out->atomic++;
rcu_read_lock(); rcu_read_lock();
g = rcu_dereference(c->disk_groups); struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
if (!g) if (!g)
goto out; goto out;
for (i = 0; i < g->nr; i++) { for (unsigned i = 0; i < g->nr; i++) {
if (i) if (i)
prt_printf(out, " "); prt_printf(out, " ");
...@@ -111,7 +106,7 @@ void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c) ...@@ -111,7 +106,7 @@ void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
} }
prt_printf(out, "[parent %d devs", g->entries[i].parent); prt_printf(out, "[parent %d devs", g->entries[i].parent);
for_each_member_device_rcu(ca, c, iter, &g->entries[i].devs) for_each_member_device_rcu(c, ca, &g->entries[i].devs)
prt_printf(out, " %s", ca->name); prt_printf(out, " %s", ca->name);
prt_printf(out, "]"); prt_printf(out, "]");
} }
......
...@@ -1243,18 +1243,17 @@ static int unsigned_cmp(const void *_l, const void *_r) ...@@ -1243,18 +1243,17 @@ static int unsigned_cmp(const void *_l, const void *_r)
static unsigned pick_blocksize(struct bch_fs *c, static unsigned pick_blocksize(struct bch_fs *c,
struct bch_devs_mask *devs) struct bch_devs_mask *devs)
{ {
struct bch_dev *ca; unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX];
unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
struct { struct {
unsigned nr, size; unsigned nr, size;
} cur = { 0, 0 }, best = { 0, 0 }; } cur = { 0, 0 }, best = { 0, 0 };
for_each_member_device_rcu(ca, c, i, devs) for_each_member_device_rcu(c, ca, devs)
sizes[nr++] = ca->mi.bucket_size; sizes[nr++] = ca->mi.bucket_size;
sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL); sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
for (i = 0; i < nr; i++) { for (unsigned i = 0; i < nr; i++) {
if (sizes[i] != cur.size) { if (sizes[i] != cur.size) {
if (cur.nr > best.nr) if (cur.nr > best.nr)
best = cur; best = cur;
...@@ -1337,8 +1336,6 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, ...@@ -1337,8 +1336,6 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
enum bch_watermark watermark) enum bch_watermark watermark)
{ {
struct ec_stripe_head *h; struct ec_stripe_head *h;
struct bch_dev *ca;
unsigned i;
h = kzalloc(sizeof(*h), GFP_KERNEL); h = kzalloc(sizeof(*h), GFP_KERNEL);
if (!h) if (!h)
...@@ -1355,13 +1352,13 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target, ...@@ -1355,13 +1352,13 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
rcu_read_lock(); rcu_read_lock();
h->devs = target_rw_devs(c, BCH_DATA_user, target); h->devs = target_rw_devs(c, BCH_DATA_user, target);
for_each_member_device_rcu(ca, c, i, &h->devs) for_each_member_device_rcu(c, ca, &h->devs)
if (!ca->mi.durability) if (!ca->mi.durability)
__clear_bit(i, h->devs.d); __clear_bit(ca->dev_idx, h->devs.d);
h->blocksize = pick_blocksize(c, &h->devs); h->blocksize = pick_blocksize(c, &h->devs);
for_each_member_device_rcu(ca, c, i, &h->devs) for_each_member_device_rcu(c, ca, &h->devs)
if (ca->mi.bucket_size == h->blocksize) if (ca->mi.bucket_size == h->blocksize)
h->nr_active_devs++; h->nr_active_devs++;
......
...@@ -1294,11 +1294,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) ...@@ -1294,11 +1294,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
{ {
struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_fs *c = container_of(j, struct bch_fs, journal);
union journal_res_state s; union journal_res_state s;
struct bch_dev *ca;
unsigned long now = jiffies; unsigned long now = jiffies;
u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes; u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
u64 seq;
unsigned i;
if (!out->nr_tabstops) if (!out->nr_tabstops)
printbuf_tabstop_push(out, 24); printbuf_tabstop_push(out, 24);
...@@ -1343,10 +1340,10 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) ...@@ -1343,10 +1340,10 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
prt_newline(out); prt_newline(out);
for (seq = journal_cur_seq(j); for (u64 seq = journal_cur_seq(j);
seq >= journal_last_unwritten_seq(j); seq >= journal_last_unwritten_seq(j);
--seq) { --seq) {
i = seq & JOURNAL_BUF_MASK; unsigned i = seq & JOURNAL_BUF_MASK;
prt_printf(out, "unwritten entry:"); prt_printf(out, "unwritten entry:");
prt_tab(out); prt_tab(out);
...@@ -1390,8 +1387,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) ...@@ -1390,8 +1387,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
j->space[journal_space_total].next_entry, j->space[journal_space_total].next_entry,
j->space[journal_space_total].total); j->space[journal_space_total].total);
for_each_member_device_rcu(ca, c, i, for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
&c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d)) if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
...@@ -1400,7 +1396,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j) ...@@ -1400,7 +1396,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
if (!ja->nr) if (!ja->nr)
continue; continue;
prt_printf(out, "dev %u:\n", i); prt_printf(out, "dev %u:\n", ca->dev_idx);
prt_printf(out, "\tnr\t\t%u\n", ja->nr); prt_printf(out, "\tnr\t\t%u\n", ja->nr);
prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size); prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size);
prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free); prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
......
...@@ -136,15 +136,13 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne ...@@ -136,15 +136,13 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne
enum journal_space_from from) enum journal_space_from from)
{ {
struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca; unsigned pos, nr_devs = 0;
unsigned i, pos, nr_devs = 0;
struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX]; struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space)); BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
rcu_read_lock(); rcu_read_lock();
for_each_member_device_rcu(ca, c, i, for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
&c->rw_devs[BCH_DATA_journal]) {
if (!ca->journal.nr) if (!ca->journal.nr)
continue; continue;
...@@ -173,19 +171,17 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne ...@@ -173,19 +171,17 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne
void bch2_journal_space_available(struct journal *j) void bch2_journal_space_available(struct journal *j)
{ {
struct bch_fs *c = container_of(j, struct bch_fs, journal); struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
unsigned clean, clean_ondisk, total; unsigned clean, clean_ondisk, total;
unsigned max_entry_size = min(j->buf[0].buf_size >> 9, unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
j->buf[1].buf_size >> 9); j->buf[1].buf_size >> 9);
unsigned i, nr_online = 0, nr_devs_want; unsigned nr_online = 0, nr_devs_want;
bool can_discard = false; bool can_discard = false;
int ret = 0; int ret = 0;
lockdep_assert_held(&j->lock); lockdep_assert_held(&j->lock);
rcu_read_lock(); rcu_read_lock();
for_each_member_device_rcu(ca, c, i, for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
&c->rw_devs[BCH_DATA_journal]) {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
if (!ja->nr) if (!ja->nr)
...@@ -216,7 +212,7 @@ void bch2_journal_space_available(struct journal *j) ...@@ -216,7 +212,7 @@ void bch2_journal_space_available(struct journal *j)
nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas); nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
for (i = 0; i < journal_space_nr; i++) for (unsigned i = 0; i < journal_space_nr; i++)
j->space[i] = __journal_space_available(j, nr_devs_want, i); j->space[i] = __journal_space_available(j, nr_devs_want, i);
clean_ondisk = j->space[journal_space_clean_ondisk].total; clean_ondisk = j->space[journal_space_clean_ondisk].total;
......
...@@ -358,14 +358,12 @@ const struct bch_sb_field_ops bch_sb_field_ops_members_v2 = { ...@@ -358,14 +358,12 @@ const struct bch_sb_field_ops bch_sb_field_ops_members_v2 = {
void bch2_sb_members_from_cpu(struct bch_fs *c) void bch2_sb_members_from_cpu(struct bch_fs *c)
{ {
struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2); struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
struct bch_dev *ca;
unsigned i, e;
rcu_read_lock(); rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL) { for_each_member_device_rcu(c, ca, NULL) {
struct bch_member *m = __bch2_members_v2_get_mut(mi, i); struct bch_member *m = __bch2_members_v2_get_mut(mi, ca->dev_idx);
for (e = 0; e < BCH_MEMBER_ERROR_NR; e++) for (unsigned e = 0; e < BCH_MEMBER_ERROR_NR; e++)
m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e])); m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e]));
} }
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -79,33 +79,38 @@ static inline struct bch_devs_list bch2_dev_list_single(unsigned dev) ...@@ -79,33 +79,38 @@ static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
return (struct bch_devs_list) { .nr = 1, .data[0] = dev }; return (struct bch_devs_list) { .nr = 1, .data[0] = dev };
} }
static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter, static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx,
const struct bch_devs_mask *mask) const struct bch_devs_mask *mask)
{ {
struct bch_dev *ca = NULL; struct bch_dev *ca = NULL;
while ((*iter = mask while ((idx = mask
? find_next_bit(mask->d, c->sb.nr_devices, *iter) ? find_next_bit(mask->d, c->sb.nr_devices, idx)
: *iter) < c->sb.nr_devices && : idx) < c->sb.nr_devices &&
!(ca = rcu_dereference_check(c->devs[*iter], !(ca = rcu_dereference_check(c->devs[idx],
lockdep_is_held(&c->state_lock)))) lockdep_is_held(&c->state_lock))))
(*iter)++; idx++;
return ca; return ca;
} }
#define for_each_member_device_rcu(ca, c, iter, mask) \ static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca,
for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++) const struct bch_devs_mask *mask)
{
return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask);
}
#define for_each_member_device_rcu(_c, _ca, _mask) \
for (struct bch_dev *_ca = NULL; \
(_ca = __bch2_next_dev((_c), _ca, (_mask)));)
static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca) static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
{ {
unsigned idx = ca ? ca->dev_idx + 1 : 0;
if (ca) if (ca)
percpu_ref_put(&ca->ref); percpu_ref_put(&ca->ref);
rcu_read_lock(); rcu_read_lock();
if ((ca = __bch2_next_dev(c, &idx, NULL))) if ((ca = __bch2_next_dev(c, ca, NULL)))
percpu_ref_get(&ca->ref); percpu_ref_get(&ca->ref);
rcu_read_unlock(); rcu_read_unlock();
...@@ -126,16 +131,14 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c, ...@@ -126,16 +131,14 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
struct bch_dev *ca, struct bch_dev *ca,
unsigned state_mask) unsigned state_mask)
{ {
unsigned idx = ca ? ca->dev_idx + 1 : 0;
if (ca) if (ca)
percpu_ref_put(&ca->io_ref); percpu_ref_put(&ca->io_ref);
rcu_read_lock(); rcu_read_lock();
while ((ca = __bch2_next_dev(c, &idx, NULL)) && while ((ca = __bch2_next_dev(c, ca, NULL)) &&
(!((1 << ca->mi.state) & state_mask) || (!((1 << ca->mi.state) & state_mask) ||
!percpu_ref_tryget(&ca->io_ref))) !percpu_ref_tryget(&ca->io_ref)))
idx++; ;
rcu_read_unlock(); rcu_read_unlock();
return ca; return ca;
......
...@@ -167,14 +167,12 @@ static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *); ...@@ -167,14 +167,12 @@ static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
struct bch_fs *bch2_dev_to_fs(dev_t dev) struct bch_fs *bch2_dev_to_fs(dev_t dev)
{ {
struct bch_fs *c; struct bch_fs *c;
struct bch_dev *ca;
unsigned i;
mutex_lock(&bch_fs_list_lock); mutex_lock(&bch_fs_list_lock);
rcu_read_lock(); rcu_read_lock();
list_for_each_entry(c, &bch_fs_list, list) list_for_each_entry(c, &bch_fs_list, list)
for_each_member_device_rcu(ca, c, i, NULL) for_each_member_device_rcu(c, ca, NULL)
if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) { if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
closure_get(&c->cl); closure_get(&c->cl);
goto found; goto found;
...@@ -215,14 +213,13 @@ struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid) ...@@ -215,14 +213,13 @@ struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
static void bch2_dev_usage_journal_reserve(struct bch_fs *c) static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
{ {
struct bch_dev *ca; unsigned nr = 0, u64s =
unsigned i, nr = 0, u64s =
((sizeof(struct jset_entry_dev_usage) + ((sizeof(struct jset_entry_dev_usage) +
sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) / sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
sizeof(u64); sizeof(u64);
rcu_read_lock(); rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL) for_each_member_device_rcu(c, ca, NULL)
nr++; nr++;
rcu_read_unlock(); rcu_read_unlock();
...@@ -1906,18 +1903,14 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets) ...@@ -1906,18 +1903,14 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
/* return with ref on ca->ref: */ /* return with ref on ca->ref: */
struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name) struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
{ {
struct bch_dev *ca;
unsigned i;
rcu_read_lock(); rcu_read_lock();
for_each_member_device_rcu(ca, c, i, NULL) for_each_member_device_rcu(c, ca, NULL)
if (!strcmp(name, ca->name)) if (!strcmp(name, ca->name)) {
goto found;
ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
found:
rcu_read_unlock(); rcu_read_unlock();
return ca; return ca;
}
rcu_read_unlock();
return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
} }
/* Filesystem open: */ /* Filesystem open: */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment