Commit 9f9a5347 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'bcachefs-2024-09-28' of git://evilpiepirate.org/bcachefs

Pull more bcachefs updates from Kent Overstreet:
 "Assorted minor syzbot fixes, and for bigger stuff:

  Fix two disk accounting rewrite bugs:

   - Disk accounting keys use the version field of bkey so that journal
     replay can tell which updates have been applied to the btree.

     This is set in the transaction commit path, after we've gotten our
     journal reservation (and our time ordering), but the
     BCH_TRANS_COMMIT_skip_accounting_apply flag that journal replay
     uses was incorrectly skipping this for new updates generated prior
     to journal replay.

     This fixes the underlying cause of an assertion pop in
     disk_accounting_read.

   - A couple of fixes for disk accounting + device removal.

     Checking if acocunting replicas entries were marked in the
     superblock was being done at the wrong point, when deltas in the
     journal could still zero them out, and then additionally we'd try
     to add a missing replicas entry to the superblock without checking
     if it referred to an invalid (removed) device.

  A whole slew of repair fixes:

   - fix infinite loop in propagate_key_to_snapshot_leaves(), this fixes
     an infinite loop when repairing a filesystem with many snapshots

   - fix incorrect transaction restart handling leading to occasional
     "fsck counted ..." warnings

   - fix warning in __bch2_fsck_err() for bkey fsck errors

   - check_inode() in fsck now correctly checks if the filesystem was
     clean

   - there shouldn't be pending logged ops if the fs was clean, we now
     check for this

   - remove_backpointer() doesn't remove a dirent that doesn't actually
     point to the inode

   - many more fsck errors are AUTOFIX"

* tag 'bcachefs-2024-09-28' of git://evilpiepirate.org/bcachefs: (35 commits)
  bcachefs: check_subvol_path() now prints subvol root inode
  bcachefs: remove_backpointer() now checks if dirent points to inode
  bcachefs: dirent_points_to_inode() now warns on mismatch
  bcachefs: Fix lost wake up
  bcachefs: Check for logged ops when clean
  bcachefs: BCH_FS_clean_recovery
  bcachefs: Convert disk accounting BUG_ON() to WARN_ON()
  bcachefs: Fix BCH_TRANS_COMMIT_skip_accounting_apply
  bcachefs: Check for accounting keys with bversion=0
  bcachefs: rename version -> bversion
  bcachefs: Don't delete unlinked inodes before logged op resume
  bcachefs: Fix BCH_SB_ERRS() so we can reorder
  bcachefs: Fix fsck warnings from bkey validation
  bcachefs: Move transaction commit path validation to as late as possible
  bcachefs: Fix disk accounting attempting to mark invalid replicas entry
  bcachefs: Fix unlocked access to c->disk_sb.sb in bch2_replicas_entry_validate()
  bcachefs: Fix accounting read + device removal
  bcachefs: bch_accounting_mode
  bcachefs: fix transaction restart handling in check_extents(), check_dirents()
  bcachefs: kill inode_walker_entry.seen_this_pos
  ...
parents d37421e6 3a5895e3
...@@ -501,7 +501,7 @@ static int check_extent_checksum(struct btree_trans *trans, ...@@ -501,7 +501,7 @@ static int check_extent_checksum(struct btree_trans *trans,
prt_printf(&buf, "\n %s ", bch2_btree_id_str(o_btree)); prt_printf(&buf, "\n %s ", bch2_btree_id_str(o_btree));
bch2_bkey_val_to_text(&buf, c, extent2); bch2_bkey_val_to_text(&buf, c, extent2);
struct nonce nonce = extent_nonce(extent.k->version, p.crc); struct nonce nonce = extent_nonce(extent.k->bversion, p.crc);
struct bch_csum csum = bch2_checksum(c, p.crc.csum_type, nonce, data_buf, bytes); struct bch_csum csum = bch2_checksum(c, p.crc.csum_type, nonce, data_buf, bytes);
if (fsck_err_on(bch2_crc_cmp(csum, p.crc.csum), if (fsck_err_on(bch2_crc_cmp(csum, p.crc.csum),
trans, dup_backpointer_to_bad_csum_extent, trans, dup_backpointer_to_bad_csum_extent,
......
...@@ -594,6 +594,7 @@ struct bch_dev { ...@@ -594,6 +594,7 @@ struct bch_dev {
#define BCH_FS_FLAGS() \ #define BCH_FS_FLAGS() \
x(new_fs) \ x(new_fs) \
x(started) \ x(started) \
x(clean_recovery) \
x(btree_running) \ x(btree_running) \
x(accounting_replay_done) \ x(accounting_replay_done) \
x(may_go_rw) \ x(may_go_rw) \
...@@ -776,7 +777,7 @@ struct bch_fs { ...@@ -776,7 +777,7 @@ struct bch_fs {
unsigned nsec_per_time_unit; unsigned nsec_per_time_unit;
u64 features; u64 features;
u64 compat; u64 compat;
unsigned long errors_silent[BITS_TO_LONGS(BCH_SB_ERR_MAX)]; unsigned long errors_silent[BITS_TO_LONGS(BCH_FSCK_ERR_MAX)];
u64 btrees_lost_data; u64 btrees_lost_data;
} sb; } sb;
......
...@@ -217,7 +217,7 @@ struct bkey { ...@@ -217,7 +217,7 @@ struct bkey {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__u8 pad[1]; __u8 pad[1];
struct bversion version; struct bversion bversion;
__u32 size; /* extent size, in sectors */ __u32 size; /* extent size, in sectors */
struct bpos p; struct bpos p;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
...@@ -328,8 +328,8 @@ enum bch_bkey_fields { ...@@ -328,8 +328,8 @@ enum bch_bkey_fields {
bkey_format_field(OFFSET, p.offset), \ bkey_format_field(OFFSET, p.offset), \
bkey_format_field(SNAPSHOT, p.snapshot), \ bkey_format_field(SNAPSHOT, p.snapshot), \
bkey_format_field(SIZE, size), \ bkey_format_field(SIZE, size), \
bkey_format_field(VERSION_HI, version.hi), \ bkey_format_field(VERSION_HI, bversion.hi), \
bkey_format_field(VERSION_LO, version.lo), \ bkey_format_field(VERSION_LO, bversion.lo), \
}, \ }, \
}) })
......
...@@ -214,9 +214,9 @@ static __always_inline int bversion_cmp(struct bversion l, struct bversion r) ...@@ -214,9 +214,9 @@ static __always_inline int bversion_cmp(struct bversion l, struct bversion r)
#define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 }) #define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 })
#define MAX_VERSION ((struct bversion) { .hi = ~0, .lo = ~0ULL }) #define MAX_VERSION ((struct bversion) { .hi = ~0, .lo = ~0ULL })
static __always_inline int bversion_zero(struct bversion v) static __always_inline bool bversion_zero(struct bversion v)
{ {
return !bversion_cmp(v, ZERO_VERSION); return bversion_cmp(v, ZERO_VERSION) == 0;
} }
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
...@@ -554,8 +554,8 @@ static inline void bch2_bkey_pack_test(void) {} ...@@ -554,8 +554,8 @@ static inline void bch2_bkey_pack_test(void) {}
x(BKEY_FIELD_OFFSET, p.offset) \ x(BKEY_FIELD_OFFSET, p.offset) \
x(BKEY_FIELD_SNAPSHOT, p.snapshot) \ x(BKEY_FIELD_SNAPSHOT, p.snapshot) \
x(BKEY_FIELD_SIZE, size) \ x(BKEY_FIELD_SIZE, size) \
x(BKEY_FIELD_VERSION_HI, version.hi) \ x(BKEY_FIELD_VERSION_HI, bversion.hi) \
x(BKEY_FIELD_VERSION_LO, version.lo) x(BKEY_FIELD_VERSION_LO, bversion.lo)
struct bkey_format_state { struct bkey_format_state {
u64 field_min[BKEY_NR_FIELDS]; u64 field_min[BKEY_NR_FIELDS];
......
...@@ -289,7 +289,7 @@ void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k) ...@@ -289,7 +289,7 @@ void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
bch2_bpos_to_text(out, k->p); bch2_bpos_to_text(out, k->p);
prt_printf(out, " len %u ver %llu", k->size, k->version.lo); prt_printf(out, " len %u ver %llu", k->size, k->bversion.lo);
} else { } else {
prt_printf(out, "(null)"); prt_printf(out, "(null)");
} }
......
...@@ -70,7 +70,7 @@ bool bch2_bkey_normalize(struct bch_fs *, struct bkey_s); ...@@ -70,7 +70,7 @@ bool bch2_bkey_normalize(struct bch_fs *, struct bkey_s);
static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct bkey *r) static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct bkey *r)
{ {
return l->type == r->type && return l->type == r->type &&
!bversion_cmp(l->version, r->version) && !bversion_cmp(l->bversion, r->bversion) &&
bpos_eq(l->p, bkey_start_pos(r)); bpos_eq(l->p, bkey_start_pos(r));
} }
......
...@@ -513,6 +513,8 @@ int bch2_check_topology(struct bch_fs *c) ...@@ -513,6 +513,8 @@ int bch2_check_topology(struct bch_fs *c)
struct bpos pulled_from_scan = POS_MIN; struct bpos pulled_from_scan = POS_MIN;
int ret = 0; int ret = 0;
bch2_trans_srcu_unlock(trans);
for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) { for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
struct btree_root *r = bch2_btree_id_root(c, i); struct btree_root *r = bch2_btree_id_root(c, i);
bool reconstructed_root = false; bool reconstructed_root = false;
...@@ -599,15 +601,15 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id, ...@@ -599,15 +601,15 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
if (initial) { if (initial) {
BUG_ON(bch2_journal_seq_verify && BUG_ON(bch2_journal_seq_verify &&
k.k->version.lo > atomic64_read(&c->journal.seq)); k.k->bversion.lo > atomic64_read(&c->journal.seq));
if (fsck_err_on(btree_id != BTREE_ID_accounting && if (fsck_err_on(btree_id != BTREE_ID_accounting &&
k.k->version.lo > atomic64_read(&c->key_version), k.k->bversion.lo > atomic64_read(&c->key_version),
trans, bkey_version_in_future, trans, bkey_version_in_future,
"key version number higher than recorded %llu\n %s", "key version number higher than recorded %llu\n %s",
atomic64_read(&c->key_version), atomic64_read(&c->key_version),
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
atomic64_set(&c->key_version, k.k->version.lo); atomic64_set(&c->key_version, k.k->bversion.lo);
} }
if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k), if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
......
...@@ -1195,6 +1195,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, ...@@ -1195,6 +1195,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
set_btree_bset(b, b->set, &b->data->keys); set_btree_bset(b, b->set, &b->data->keys);
b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter); b->nr = bch2_key_sort_fix_overlapping(c, &sorted->keys, iter);
memset((uint8_t *)(sorted + 1) + b->nr.live_u64s * sizeof(u64), 0,
btree_buf_bytes(b) -
sizeof(struct btree_node) -
b->nr.live_u64s * sizeof(u64));
u64s = le16_to_cpu(sorted->keys.u64s); u64s = le16_to_cpu(sorted->keys.u64s);
*sorted = *b->data; *sorted = *b->data;
...@@ -1219,7 +1223,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, ...@@ -1219,7 +1223,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
ret = bch2_bkey_val_validate(c, u.s_c, READ); ret = bch2_bkey_val_validate(c, u.s_c, READ);
if (ret == -BCH_ERR_fsck_delete_bkey || if (ret == -BCH_ERR_fsck_delete_bkey ||
(bch2_inject_invalid_keys && (bch2_inject_invalid_keys &&
!bversion_cmp(u.k->version, MAX_VERSION))) { !bversion_cmp(u.k->bversion, MAX_VERSION))) {
btree_keys_account_key_drop(&b->nr, 0, k); btree_keys_account_key_drop(&b->nr, 0, k);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
......
...@@ -275,7 +275,7 @@ static int read_btree_nodes(struct find_btree_nodes *f) ...@@ -275,7 +275,7 @@ static int read_btree_nodes(struct find_btree_nodes *f)
w->ca = ca; w->ca = ca;
t = kthread_run(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name); t = kthread_run(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
ret = IS_ERR_OR_NULL(t); ret = PTR_ERR_OR_ZERO(t);
if (ret) { if (ret) {
percpu_ref_put(&ca->io_ref); percpu_ref_put(&ca->io_ref);
closure_put(&cl); closure_put(&cl);
......
...@@ -684,10 +684,10 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, ...@@ -684,10 +684,10 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
!(flags & BCH_TRANS_COMMIT_no_journal_res)) { !(flags & BCH_TRANS_COMMIT_no_journal_res)) {
if (bch2_journal_seq_verify) if (bch2_journal_seq_verify)
trans_for_each_update(trans, i) trans_for_each_update(trans, i)
i->k->k.version.lo = trans->journal_res.seq; i->k->k.bversion.lo = trans->journal_res.seq;
else if (bch2_inject_invalid_keys) else if (bch2_inject_invalid_keys)
trans_for_each_update(trans, i) trans_for_each_update(trans, i)
i->k->k.version = MAX_VERSION; i->k->k.bversion = MAX_VERSION;
} }
h = trans->hooks; h = trans->hooks;
...@@ -700,27 +700,31 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, ...@@ -700,27 +700,31 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
struct jset_entry *entry = trans->journal_entries; struct jset_entry *entry = trans->journal_entries;
if (likely(!(flags & BCH_TRANS_COMMIT_skip_accounting_apply))) {
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
for (entry = trans->journal_entries; for (entry = trans->journal_entries;
entry != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s); entry != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
entry = vstruct_next(entry)) entry = vstruct_next(entry))
if (jset_entry_is_key(entry) && entry->start->k.type == KEY_TYPE_accounting) { if (entry->type == BCH_JSET_ENTRY_write_buffer_keys &&
entry->start->k.type == KEY_TYPE_accounting) {
BUG_ON(!trans->journal_res.ref);
struct bkey_i_accounting *a = bkey_i_to_accounting(entry->start); struct bkey_i_accounting *a = bkey_i_to_accounting(entry->start);
a->k.version = journal_pos_to_bversion(&trans->journal_res, a->k.bversion = journal_pos_to_bversion(&trans->journal_res,
(u64 *) entry - (u64 *) trans->journal_entries); (u64 *) entry - (u64 *) trans->journal_entries);
BUG_ON(bversion_zero(a->k.version)); BUG_ON(bversion_zero(a->k.bversion));
ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), false, false);
if (likely(!(flags & BCH_TRANS_COMMIT_skip_accounting_apply))) {
ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), BCH_ACCOUNTING_normal);
if (ret) if (ret)
goto revert_fs_usage; goto revert_fs_usage;
} }
}
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
/* XXX: we only want to run this if deltas are nonzero */ /* XXX: we only want to run this if deltas are nonzero */
bch2_trans_account_disk_usage_change(trans); bch2_trans_account_disk_usage_change(trans);
}
trans_for_each_update(trans, i) trans_for_each_update(trans, i)
if (btree_node_type_has_atomic_triggers(i->bkey_type)) { if (btree_node_type_has_atomic_triggers(i->bkey_type)) {
...@@ -735,6 +739,40 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, ...@@ -735,6 +739,40 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
goto fatal_err; goto fatal_err;
} }
trans_for_each_update(trans, i) {
enum bch_validate_flags invalid_flags = 0;
if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k),
i->bkey_type, invalid_flags);
if (unlikely(ret)){
bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n",
trans->fn, (void *) i->ip_allocated);
goto fatal_err;
}
btree_insert_entry_checks(trans, i);
}
for (struct jset_entry *i = trans->journal_entries;
i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
i = vstruct_next(i)) {
enum bch_validate_flags invalid_flags = 0;
if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
ret = bch2_journal_entry_validate(c, NULL, i,
bcachefs_metadata_version_current,
CPU_BIG_ENDIAN, invalid_flags);
if (unlikely(ret)) {
bch2_trans_inconsistent(trans, "invalid journal entry on insert from %s\n",
trans->fn);
goto fatal_err;
}
}
if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) { if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res))) {
struct journal *j = &c->journal; struct journal *j = &c->journal;
struct jset_entry *entry; struct jset_entry *entry;
...@@ -798,7 +836,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags, ...@@ -798,7 +836,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
struct bkey_s_accounting a = bkey_i_to_s_accounting(entry2->start); struct bkey_s_accounting a = bkey_i_to_s_accounting(entry2->start);
bch2_accounting_neg(a); bch2_accounting_neg(a);
bch2_accounting_mem_mod_locked(trans, a.c, false, false); bch2_accounting_mem_mod_locked(trans, a.c, BCH_ACCOUNTING_normal);
bch2_accounting_neg(a); bch2_accounting_neg(a);
} }
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
...@@ -1019,40 +1057,6 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags) ...@@ -1019,40 +1057,6 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
if (ret) if (ret)
goto out_reset; goto out_reset;
trans_for_each_update(trans, i) {
enum bch_validate_flags invalid_flags = 0;
if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k),
i->bkey_type, invalid_flags);
if (unlikely(ret)){
bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n",
trans->fn, (void *) i->ip_allocated);
return ret;
}
btree_insert_entry_checks(trans, i);
}
for (struct jset_entry *i = trans->journal_entries;
i != (void *) ((u64 *) trans->journal_entries + trans->journal_entries_u64s);
i = vstruct_next(i)) {
enum bch_validate_flags invalid_flags = 0;
if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
ret = bch2_journal_entry_validate(c, NULL, i,
bcachefs_metadata_version_current,
CPU_BIG_ENDIAN, invalid_flags);
if (unlikely(ret)) {
bch2_trans_inconsistent(trans, "invalid journal entry on insert from %s\n",
trans->fn);
return ret;
}
}
if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) { if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
ret = do_bch2_trans_commit_to_journal_replay(trans); ret = do_bch2_trans_commit_to_journal_replay(trans);
goto out_reset; goto out_reset;
......
...@@ -220,7 +220,8 @@ static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *t ...@@ -220,7 +220,8 @@ static inline struct bkey_i *__bch2_bkey_make_mut_noupdate(struct btree_trans *t
if (type && k.k->type != type) if (type && k.k->type != type)
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
mut = bch2_trans_kmalloc_nomemzero(trans, bytes); /* extra padding for varint_decode_fast... */
mut = bch2_trans_kmalloc_nomemzero(trans, bytes + 8);
if (!IS_ERR(mut)) { if (!IS_ERR(mut)) {
bkey_reassemble(mut, k); bkey_reassemble(mut, k);
......
...@@ -639,7 +639,7 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -639,7 +639,7 @@ int bch2_data_update_init(struct btree_trans *trans,
bch2_write_op_init(&m->op, c, io_opts); bch2_write_op_init(&m->op, c, io_opts);
m->op.pos = bkey_start_pos(k.k); m->op.pos = bkey_start_pos(k.k);
m->op.version = k.k->version; m->op.version = k.k->bversion;
m->op.target = data_opts.target; m->op.target = data_opts.target;
m->op.write_point = wp; m->op.write_point = wp;
m->op.nr_replicas = 0; m->op.nr_replicas = 0;
......
...@@ -134,6 +134,10 @@ int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k, ...@@ -134,6 +134,10 @@ int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k,
void *end = &acc_k + 1; void *end = &acc_k + 1;
int ret = 0; int ret = 0;
bkey_fsck_err_on(bversion_zero(k.k->bversion),
c, accounting_key_version_0,
"accounting key with version=0");
switch (acc_k.type) { switch (acc_k.type) {
case BCH_DISK_ACCOUNTING_nr_inodes: case BCH_DISK_ACCOUNTING_nr_inodes:
end = field_end(acc_k, nr_inodes); end = field_end(acc_k, nr_inodes);
...@@ -291,7 +295,7 @@ static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accoun ...@@ -291,7 +295,7 @@ static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accoun
struct accounting_mem_entry n = { struct accounting_mem_entry n = {
.pos = a.k->p, .pos = a.k->p,
.version = a.k->version, .bversion = a.k->bversion,
.nr_counters = bch2_accounting_counters(a.k), .nr_counters = bch2_accounting_counters(a.k),
.v[0] = __alloc_percpu_gfp(n.nr_counters * sizeof(u64), .v[0] = __alloc_percpu_gfp(n.nr_counters * sizeof(u64),
sizeof(u64), GFP_KERNEL), sizeof(u64), GFP_KERNEL),
...@@ -319,11 +323,13 @@ static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accoun ...@@ -319,11 +323,13 @@ static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accoun
return -BCH_ERR_ENOMEM_disk_accounting; return -BCH_ERR_ENOMEM_disk_accounting;
} }
int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a, bool gc) int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a,
enum bch_accounting_mode mode)
{ {
struct bch_replicas_padded r; struct bch_replicas_padded r;
if (accounting_to_replicas(&r.e, a.k->p) && if (mode != BCH_ACCOUNTING_read &&
accounting_to_replicas(&r.e, a.k->p) &&
!bch2_replicas_marked_locked(c, &r.e)) !bch2_replicas_marked_locked(c, &r.e))
return -BCH_ERR_btree_insert_need_mark_replicas; return -BCH_ERR_btree_insert_need_mark_replicas;
...@@ -566,7 +572,9 @@ int bch2_gc_accounting_done(struct bch_fs *c) ...@@ -566,7 +572,9 @@ int bch2_gc_accounting_done(struct bch_fs *c)
struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i; struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i;
accounting_key_init(&k_i.k, &acc_k, src_v, nr); accounting_key_init(&k_i.k, &acc_k, src_v, nr);
bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(&k_i.k), false, false); bch2_accounting_mem_mod_locked(trans,
bkey_i_to_s_c_accounting(&k_i.k),
BCH_ACCOUNTING_normal);
preempt_disable(); preempt_disable();
struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage); struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
...@@ -589,30 +597,14 @@ int bch2_gc_accounting_done(struct bch_fs *c) ...@@ -589,30 +597,14 @@ int bch2_gc_accounting_done(struct bch_fs *c)
static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k) static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
if (k.k->type != KEY_TYPE_accounting) if (k.k->type != KEY_TYPE_accounting)
return 0; return 0;
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k), false, true); int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k),
BCH_ACCOUNTING_read);
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
if (bch2_accounting_key_is_zero(bkey_s_c_to_accounting(k)) &&
ret == -BCH_ERR_btree_insert_need_mark_replicas)
ret = 0;
struct disk_accounting_pos acc;
bpos_to_disk_accounting_pos(&acc, k.k->p);
if (fsck_err_on(ret == -BCH_ERR_btree_insert_need_mark_replicas,
trans, accounting_replicas_not_marked,
"accounting not marked in superblock replicas\n %s",
(bch2_accounting_key_to_text(&buf, &acc),
buf.buf)))
ret = bch2_accounting_update_sb_one(c, k.k->p);
fsck_err:
printbuf_exit(&buf);
return ret; return ret;
} }
...@@ -624,6 +616,7 @@ int bch2_accounting_read(struct bch_fs *c) ...@@ -624,6 +616,7 @@ int bch2_accounting_read(struct bch_fs *c)
{ {
struct bch_accounting_mem *acc = &c->accounting; struct bch_accounting_mem *acc = &c->accounting;
struct btree_trans *trans = bch2_trans_get(c); struct btree_trans *trans = bch2_trans_get(c);
struct printbuf buf = PRINTBUF;
int ret = for_each_btree_key(trans, iter, int ret = for_each_btree_key(trans, iter,
BTREE_ID_accounting, POS_MIN, BTREE_ID_accounting, POS_MIN,
...@@ -647,7 +640,7 @@ int bch2_accounting_read(struct bch_fs *c) ...@@ -647,7 +640,7 @@ int bch2_accounting_read(struct bch_fs *c)
accounting_pos_cmp, &k.k->p); accounting_pos_cmp, &k.k->p);
bool applied = idx < acc->k.nr && bool applied = idx < acc->k.nr &&
bversion_cmp(acc->k.data[idx].version, k.k->version) >= 0; bversion_cmp(acc->k.data[idx].bversion, k.k->bversion) >= 0;
if (applied) if (applied)
continue; continue;
...@@ -655,7 +648,7 @@ int bch2_accounting_read(struct bch_fs *c) ...@@ -655,7 +648,7 @@ int bch2_accounting_read(struct bch_fs *c)
if (i + 1 < &darray_top(*keys) && if (i + 1 < &darray_top(*keys) &&
i[1].k->k.type == KEY_TYPE_accounting && i[1].k->k.type == KEY_TYPE_accounting &&
!journal_key_cmp(i, i + 1)) { !journal_key_cmp(i, i + 1)) {
BUG_ON(bversion_cmp(i[0].k->k.version, i[1].k->k.version) >= 0); WARN_ON(bversion_cmp(i[0].k->k.bversion, i[1].k->k.bversion) >= 0);
i[1].journal_seq = i[0].journal_seq; i[1].journal_seq = i[0].journal_seq;
...@@ -674,6 +667,45 @@ int bch2_accounting_read(struct bch_fs *c) ...@@ -674,6 +667,45 @@ int bch2_accounting_read(struct bch_fs *c)
keys->gap = keys->nr = dst - keys->data; keys->gap = keys->nr = dst - keys->data;
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
for (unsigned i = 0; i < acc->k.nr; i++) {
u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false);
if (bch2_is_zero(v, sizeof(v[0]) * acc->k.data[i].nr_counters))
continue;
struct bch_replicas_padded r;
if (!accounting_to_replicas(&r.e, acc->k.data[i].pos))
continue;
/*
* If the replicas entry is invalid it'll get cleaned up by
* check_allocations:
*/
if (bch2_replicas_entry_validate(&r.e, c, &buf))
continue;
struct disk_accounting_pos k;
bpos_to_disk_accounting_pos(&k, acc->k.data[i].pos);
if (fsck_err_on(!bch2_replicas_marked_locked(c, &r.e),
trans, accounting_replicas_not_marked,
"accounting not marked in superblock replicas\n %s",
(printbuf_reset(&buf),
bch2_accounting_key_to_text(&buf, &k),
buf.buf))) {
/*
* We're not RW yet and still single threaded, dropping
* and retaking lock is ok:
*/
percpu_up_read(&c->mark_lock);
ret = bch2_mark_replicas(c, &r.e);
if (ret)
goto fsck_err;
percpu_down_read(&c->mark_lock);
}
}
preempt_disable(); preempt_disable();
struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage); struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage);
...@@ -709,8 +741,10 @@ int bch2_accounting_read(struct bch_fs *c) ...@@ -709,8 +741,10 @@ int bch2_accounting_read(struct bch_fs *c)
} }
} }
preempt_enable(); preempt_enable();
fsck_err:
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
err: err:
printbuf_exit(&buf);
bch2_trans_put(trans); bch2_trans_put(trans);
bch_err_fn(c, ret); bch_err_fn(c, ret);
return ret; return ret;
......
...@@ -36,8 +36,8 @@ static inline void bch2_accounting_accumulate(struct bkey_i_accounting *dst, ...@@ -36,8 +36,8 @@ static inline void bch2_accounting_accumulate(struct bkey_i_accounting *dst,
for (unsigned i = 0; i < bch2_accounting_counters(&dst->k); i++) for (unsigned i = 0; i < bch2_accounting_counters(&dst->k); i++)
dst->v.d[i] += src.v->d[i]; dst->v.d[i] += src.v->d[i];
if (bversion_cmp(dst->k.version, src.k->version) < 0) if (bversion_cmp(dst->k.bversion, src.k->bversion) < 0)
dst->k.version = src.k->version; dst->k.bversion = src.k->bversion;
} }
static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage, static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage,
...@@ -103,23 +103,35 @@ static inline int accounting_pos_cmp(const void *_l, const void *_r) ...@@ -103,23 +103,35 @@ static inline int accounting_pos_cmp(const void *_l, const void *_r)
return bpos_cmp(*l, *r); return bpos_cmp(*l, *r);
} }
int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, bool); enum bch_accounting_mode {
BCH_ACCOUNTING_normal,
BCH_ACCOUNTING_gc,
BCH_ACCOUNTING_read,
};
int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, enum bch_accounting_mode);
void bch2_accounting_mem_gc(struct bch_fs *); void bch2_accounting_mem_gc(struct bch_fs *);
/* /*
* Update in memory counters so they match the btree update we're doing; called * Update in memory counters so they match the btree update we're doing; called
* from transaction commit path * from transaction commit path
*/ */
static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc, bool read) static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans,
struct bkey_s_c_accounting a,
enum bch_accounting_mode mode)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_accounting_mem *acc = &c->accounting;
struct disk_accounting_pos acc_k; struct disk_accounting_pos acc_k;
bpos_to_disk_accounting_pos(&acc_k, a.k->p); bpos_to_disk_accounting_pos(&acc_k, a.k->p);
bool gc = mode == BCH_ACCOUNTING_gc;
EBUG_ON(gc && !acc->gc_running);
if (acc_k.type == BCH_DISK_ACCOUNTING_inum) if (acc_k.type == BCH_DISK_ACCOUNTING_inum)
return 0; return 0;
if (!gc && !read) { if (mode == BCH_ACCOUNTING_normal) {
switch (acc_k.type) { switch (acc_k.type) {
case BCH_DISK_ACCOUNTING_persistent_reserved: case BCH_DISK_ACCOUNTING_persistent_reserved:
trans->fs_usage_delta.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0]; trans->fs_usage_delta.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0];
...@@ -140,14 +152,11 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, stru ...@@ -140,14 +152,11 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, stru
} }
} }
struct bch_accounting_mem *acc = &c->accounting;
unsigned idx; unsigned idx;
EBUG_ON(gc && !acc->gc_running);
while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]), while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
accounting_pos_cmp, &a.k->p)) >= acc->k.nr) { accounting_pos_cmp, &a.k->p)) >= acc->k.nr) {
int ret = bch2_accounting_mem_insert(c, a, gc); int ret = bch2_accounting_mem_insert(c, a, mode);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -164,7 +173,7 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, stru ...@@ -164,7 +173,7 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, stru
static inline int bch2_accounting_mem_add(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc) static inline int bch2_accounting_mem_add(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc)
{ {
percpu_down_read(&trans->c->mark_lock); percpu_down_read(&trans->c->mark_lock);
int ret = bch2_accounting_mem_mod_locked(trans, a, gc, false); int ret = bch2_accounting_mem_mod_locked(trans, a, gc ? BCH_ACCOUNTING_gc : BCH_ACCOUNTING_normal);
percpu_up_read(&trans->c->mark_lock); percpu_up_read(&trans->c->mark_lock);
return ret; return ret;
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
struct accounting_mem_entry { struct accounting_mem_entry {
struct bpos pos; struct bpos pos;
struct bversion version; struct bversion bversion;
unsigned nr_counters; unsigned nr_counters;
u64 __percpu *v[2]; u64 __percpu *v[2];
}; };
......
...@@ -239,7 +239,19 @@ int __bch2_fsck_err(struct bch_fs *c, ...@@ -239,7 +239,19 @@ int __bch2_fsck_err(struct bch_fs *c,
if (!c) if (!c)
c = trans->c; c = trans->c;
WARN_ON(!trans && bch2_current_has_btree_trans(c)); /*
* Ugly: if there's a transaction in the current task it has to be
* passed in to unlock if we prompt for user input.
*
* But, plumbing a transaction and transaction restarts into
* bkey_validate() is problematic.
*
* So:
* - make all bkey errors AUTOFIX, they're simple anyways (we just
* delete the key)
* - and we don't need to warn if we're not prompting
*/
WARN_ON(!(flags & FSCK_AUTOFIX) && !trans && bch2_current_has_btree_trans(c));
if ((flags & FSCK_CAN_FIX) && if ((flags & FSCK_CAN_FIX) &&
test_bit(err, c->sb.errors_silent)) test_bit(err, c->sb.errors_silent))
......
...@@ -184,7 +184,7 @@ do { \ ...@@ -184,7 +184,7 @@ do { \
ret = -BCH_ERR_fsck_delete_bkey; \ ret = -BCH_ERR_fsck_delete_bkey; \
goto fsck_err; \ goto fsck_err; \
} \ } \
int _ret = __bch2_bkey_fsck_err(c, k, FSCK_CAN_FIX, \ int _ret = __bch2_bkey_fsck_err(c, k, FSCK_CAN_FIX|FSCK_AUTOFIX,\
BCH_FSCK_ERR_##_err_type, \ BCH_FSCK_ERR_##_err_type, \
_err_msg, ##__VA_ARGS__); \ _err_msg, ##__VA_ARGS__); \
if (_ret != -BCH_ERR_fsck_fix && \ if (_ret != -BCH_ERR_fsck_fix && \
......
This diff is collapsed.
...@@ -320,9 +320,11 @@ static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k, ...@@ -320,9 +320,11 @@ static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k,
int bch2_inode_unpack(struct bkey_s_c k, int bch2_inode_unpack(struct bkey_s_c k,
struct bch_inode_unpacked *unpacked) struct bch_inode_unpacked *unpacked)
{ {
if (likely(k.k->type == KEY_TYPE_inode_v3)) unpacked->bi_snapshot = k.k->p.snapshot;
return bch2_inode_unpack_v3(k, unpacked);
return bch2_inode_unpack_slowpath(k, unpacked); return likely(k.k->type == KEY_TYPE_inode_v3)
? bch2_inode_unpack_v3(k, unpacked)
: bch2_inode_unpack_slowpath(k, unpacked);
} }
int bch2_inode_peek_nowarn(struct btree_trans *trans, int bch2_inode_peek_nowarn(struct btree_trans *trans,
...@@ -557,7 +559,7 @@ static void __bch2_inode_unpacked_to_text(struct printbuf *out, ...@@ -557,7 +559,7 @@ static void __bch2_inode_unpacked_to_text(struct printbuf *out,
void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode) void bch2_inode_unpacked_to_text(struct printbuf *out, struct bch_inode_unpacked *inode)
{ {
prt_printf(out, "inum: %llu ", inode->bi_inum); prt_printf(out, "inum: %llu:%u ", inode->bi_inum, inode->bi_snapshot);
__bch2_inode_unpacked_to_text(out, inode); __bch2_inode_unpacked_to_text(out, inode);
} }
...@@ -1111,7 +1113,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans, ...@@ -1111,7 +1113,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
pos.offset, pos.snapshot)) pos.offset, pos.snapshot))
goto delete; goto delete;
if (c->sb.clean && if (test_bit(BCH_FS_clean_recovery, &c->flags) &&
!fsck_err(trans, deleted_inode_but_clean, !fsck_err(trans, deleted_inode_but_clean,
"filesystem marked as clean but have deleted inode %llu:%u", "filesystem marked as clean but have deleted inode %llu:%u",
pos.offset, pos.snapshot)) { pos.offset, pos.snapshot)) {
......
...@@ -69,6 +69,7 @@ typedef u64 u96; ...@@ -69,6 +69,7 @@ typedef u64 u96;
struct bch_inode_unpacked { struct bch_inode_unpacked {
u64 bi_inum; u64 bi_inum;
u32 bi_snapshot;
u64 bi_journal_seq; u64 bi_journal_seq;
__le64 bi_hash_seed; __le64 bi_hash_seed;
u64 bi_size; u64 bi_size;
......
...@@ -517,7 +517,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans, ...@@ -517,7 +517,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
if ((ret = bkey_err(k))) if ((ret = bkey_err(k)))
goto out; goto out;
if (bversion_cmp(k.k->version, rbio->version) || if (bversion_cmp(k.k->bversion, rbio->version) ||
!bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset)) !bch2_bkey_matches_ptr(c, k, rbio->pick.ptr, data_offset))
goto out; goto out;
...@@ -1031,7 +1031,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig, ...@@ -1031,7 +1031,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
rbio->read_pos = read_pos; rbio->read_pos = read_pos;
rbio->data_btree = data_btree; rbio->data_btree = data_btree;
rbio->data_pos = data_pos; rbio->data_pos = data_pos;
rbio->version = k.k->version; rbio->version = k.k->bversion;
rbio->promote = promote; rbio->promote = promote;
INIT_WORK(&rbio->work, NULL); INIT_WORK(&rbio->work, NULL);
......
...@@ -697,7 +697,7 @@ static void init_append_extent(struct bch_write_op *op, ...@@ -697,7 +697,7 @@ static void init_append_extent(struct bch_write_op *op,
e = bkey_extent_init(op->insert_keys.top); e = bkey_extent_init(op->insert_keys.top);
e->k.p = op->pos; e->k.p = op->pos;
e->k.size = crc.uncompressed_size; e->k.size = crc.uncompressed_size;
e->k.version = version; e->k.bversion = version;
if (crc.csum_type || if (crc.csum_type ||
crc.compression_type || crc.compression_type ||
...@@ -1544,7 +1544,7 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len) ...@@ -1544,7 +1544,7 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
id = bkey_inline_data_init(op->insert_keys.top); id = bkey_inline_data_init(op->insert_keys.top);
id->k.p = op->pos; id->k.p = op->pos;
id->k.version = op->version; id->k.bversion = op->version;
id->k.size = sectors; id->k.size = sectors;
iter = bio->bi_iter; iter = bio->bi_iter;
......
...@@ -605,7 +605,7 @@ static int journal_entry_data_usage_validate(struct bch_fs *c, ...@@ -605,7 +605,7 @@ static int journal_entry_data_usage_validate(struct bch_fs *c,
goto out; goto out;
} }
if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c->disk_sb.sb, &err), if (journal_entry_err_on(bch2_replicas_entry_validate(&u->r, c, &err),
c, version, jset, entry, c, version, jset, entry,
journal_entry_data_usage_bad_size, journal_entry_data_usage_bad_size,
"invalid journal entry usage: %s", err.buf)) { "invalid journal entry usage: %s", err.buf)) {
......
...@@ -37,6 +37,14 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter, ...@@ -37,6 +37,14 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
const struct bch_logged_op_fn *fn = logged_op_fn(k.k->type); const struct bch_logged_op_fn *fn = logged_op_fn(k.k->type);
struct bkey_buf sk; struct bkey_buf sk;
u32 restart_count = trans->restart_count; u32 restart_count = trans->restart_count;
struct printbuf buf = PRINTBUF;
int ret = 0;
fsck_err_on(test_bit(BCH_FS_clean_recovery, &c->flags),
trans, logged_op_but_clean,
"filesystem marked as clean but have logged op\n%s",
(bch2_bkey_val_to_text(&buf, c, k),
buf.buf));
if (!fn) if (!fn)
return 0; return 0;
...@@ -47,8 +55,9 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter, ...@@ -47,8 +55,9 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
fn->resume(trans, sk.k); fn->resume(trans, sk.k);
bch2_bkey_buf_exit(&sk, c); bch2_bkey_buf_exit(&sk, c);
fsck_err:
return trans_was_restarted(trans, restart_count); printbuf_exit(&buf);
return ret ?: trans_was_restarted(trans, restart_count);
} }
int bch2_resume_logged_ops(struct bch_fs *c) int bch2_resume_logged_ops(struct bch_fs *c)
......
...@@ -151,7 +151,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans, ...@@ -151,7 +151,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans,
struct bkey_s_c old = bch2_btree_path_peek_slot(btree_iter_path(trans, &iter), &u); struct bkey_s_c old = bch2_btree_path_peek_slot(btree_iter_path(trans, &iter), &u);
/* Has this delta already been applied to the btree? */ /* Has this delta already been applied to the btree? */
if (bversion_cmp(old.k->version, k->k->k.version) >= 0) { if (bversion_cmp(old.k->bversion, k->k->k.bversion) >= 0) {
ret = 0; ret = 0;
goto out; goto out;
} }
...@@ -717,6 +717,8 @@ int bch2_fs_recovery(struct bch_fs *c) ...@@ -717,6 +717,8 @@ int bch2_fs_recovery(struct bch_fs *c)
if (c->opts.fsck) if (c->opts.fsck)
set_bit(BCH_FS_fsck_running, &c->flags); set_bit(BCH_FS_fsck_running, &c->flags);
if (c->sb.clean)
set_bit(BCH_FS_clean_recovery, &c->flags);
ret = bch2_blacklist_table_initialize(c); ret = bch2_blacklist_table_initialize(c);
if (ret) { if (ret) {
...@@ -862,6 +864,9 @@ int bch2_fs_recovery(struct bch_fs *c) ...@@ -862,6 +864,9 @@ int bch2_fs_recovery(struct bch_fs *c)
clear_bit(BCH_FS_fsck_running, &c->flags); clear_bit(BCH_FS_fsck_running, &c->flags);
/* in case we don't run journal replay, i.e. norecovery mode */
set_bit(BCH_FS_accounting_replay_done, &c->flags);
/* fsync if we fixed errors */ /* fsync if we fixed errors */
if (test_bit(BCH_FS_errors_fixed, &c->flags) && if (test_bit(BCH_FS_errors_fixed, &c->flags) &&
bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) { bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) {
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \ x(check_directory_structure, 30, PASS_ONLINE|PASS_FSCK) \
x(check_nlinks, 31, PASS_FSCK) \ x(check_nlinks, 31, PASS_FSCK) \
x(resume_logged_ops, 23, PASS_ALWAYS) \ x(resume_logged_ops, 23, PASS_ALWAYS) \
x(delete_dead_inodes, 32, PASS_FSCK|PASS_UNCLEAN) \ x(delete_dead_inodes, 32, PASS_ALWAYS) \
x(fix_reflink_p, 33, 0) \ x(fix_reflink_p, 33, 0) \
x(set_fs_needs_rebalance, 34, 0) \ x(set_fs_needs_rebalance, 34, 0) \
......
...@@ -367,7 +367,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans, ...@@ -367,7 +367,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
r_v->k.type = bkey_type_to_indirect(&orig->k); r_v->k.type = bkey_type_to_indirect(&orig->k);
r_v->k.p = reflink_iter.pos; r_v->k.p = reflink_iter.pos;
bch2_key_resize(&r_v->k, orig->k.size); bch2_key_resize(&r_v->k, orig->k.size);
r_v->k.version = orig->k.version; r_v->k.bversion = orig->k.bversion;
set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k)); set_bkey_val_bytes(&r_v->k, sizeof(__le64) + bkey_val_bytes(&orig->k));
......
...@@ -66,7 +66,7 @@ void bch2_replicas_entry_to_text(struct printbuf *out, ...@@ -66,7 +66,7 @@ void bch2_replicas_entry_to_text(struct printbuf *out,
prt_printf(out, "]"); prt_printf(out, "]");
} }
int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r, static int bch2_replicas_entry_validate_locked(struct bch_replicas_entry_v1 *r,
struct bch_sb *sb, struct bch_sb *sb,
struct printbuf *err) struct printbuf *err)
{ {
...@@ -94,6 +94,16 @@ int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r, ...@@ -94,6 +94,16 @@ int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
return -BCH_ERR_invalid_replicas_entry; return -BCH_ERR_invalid_replicas_entry;
} }
int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
struct bch_fs *c,
struct printbuf *err)
{
mutex_lock(&c->sb_lock);
int ret = bch2_replicas_entry_validate_locked(r, c->disk_sb.sb, err);
mutex_unlock(&c->sb_lock);
return ret;
}
void bch2_cpu_replicas_to_text(struct printbuf *out, void bch2_cpu_replicas_to_text(struct printbuf *out,
struct bch_replicas_cpu *r) struct bch_replicas_cpu *r)
{ {
...@@ -676,7 +686,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r, ...@@ -676,7 +686,7 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
struct bch_replicas_entry_v1 *e = struct bch_replicas_entry_v1 *e =
cpu_replicas_entry(cpu_r, i); cpu_replicas_entry(cpu_r, i);
int ret = bch2_replicas_entry_validate(e, sb, err); int ret = bch2_replicas_entry_validate_locked(e, sb, err);
if (ret) if (ret)
return ret; return ret;
......
...@@ -10,7 +10,7 @@ void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *); ...@@ -10,7 +10,7 @@ void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *);
void bch2_replicas_entry_to_text(struct printbuf *, void bch2_replicas_entry_to_text(struct printbuf *,
struct bch_replicas_entry_v1 *); struct bch_replicas_entry_v1 *);
int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *, int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *,
struct bch_sb *, struct printbuf *); struct bch_fs *, struct printbuf *);
void bch2_cpu_replicas_to_text(struct printbuf *, struct bch_replicas_cpu *); void bch2_cpu_replicas_to_text(struct printbuf *, struct bch_replicas_cpu *);
static inline struct bch_replicas_entry_v1 * static inline struct bch_replicas_entry_v1 *
......
...@@ -167,6 +167,7 @@ struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c) ...@@ -167,6 +167,7 @@ struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
ret = bch2_sb_clean_validate_late(c, clean, READ); ret = bch2_sb_clean_validate_late(c, clean, READ);
if (ret) { if (ret) {
kfree(clean);
mutex_unlock(&c->sb_lock); mutex_unlock(&c->sb_lock);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
......
...@@ -312,8 +312,7 @@ static void bch2_sb_downgrade_to_text(struct printbuf *out, struct bch_sb *sb, ...@@ -312,8 +312,7 @@ static void bch2_sb_downgrade_to_text(struct printbuf *out, struct bch_sb *sb,
if (!first) if (!first)
prt_char(out, ','); prt_char(out, ',');
first = false; first = false;
unsigned e = le16_to_cpu(i->errors[j]); bch2_sb_error_id_to_text(out, le16_to_cpu(i->errors[j]));
prt_str(out, e < BCH_SB_ERR_MAX ? bch2_sb_error_strs[e] : "(unknown)");
} }
prt_newline(out); prt_newline(out);
} }
...@@ -353,7 +352,9 @@ int bch2_sb_downgrade_update(struct bch_fs *c) ...@@ -353,7 +352,9 @@ int bch2_sb_downgrade_update(struct bch_fs *c)
for (unsigned i = 0; i < src->nr_errors; i++) for (unsigned i = 0; i < src->nr_errors; i++)
dst->errors[i] = cpu_to_le16(src->errors[i]); dst->errors[i] = cpu_to_le16(src->errors[i]);
downgrade_table_extra(c, &table); ret = downgrade_table_extra(c, &table);
if (ret)
goto out;
if (!dst->recovery_passes[0] && if (!dst->recovery_passes[0] &&
!dst->recovery_passes[1] && !dst->recovery_passes[1] &&
...@@ -399,7 +400,7 @@ void bch2_sb_set_downgrade(struct bch_fs *c, unsigned new_minor, unsigned old_mi ...@@ -399,7 +400,7 @@ void bch2_sb_set_downgrade(struct bch_fs *c, unsigned new_minor, unsigned old_mi
for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) { for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
unsigned e = le16_to_cpu(i->errors[j]); unsigned e = le16_to_cpu(i->errors[j]);
if (e < BCH_SB_ERR_MAX) if (e < BCH_FSCK_ERR_MAX)
__set_bit(e, c->sb.errors_silent); __set_bit(e, c->sb.errors_silent);
if (e < sizeof(ext->errors_silent) * 8) if (e < sizeof(ext->errors_silent) * 8)
__set_bit_le64(e, ext->errors_silent); __set_bit_le64(e, ext->errors_silent);
......
...@@ -7,12 +7,12 @@ ...@@ -7,12 +7,12 @@
const char * const bch2_sb_error_strs[] = { const char * const bch2_sb_error_strs[] = {
#define x(t, n, ...) [n] = #t, #define x(t, n, ...) [n] = #t,
BCH_SB_ERRS() BCH_SB_ERRS()
NULL #undef x
}; };
static void bch2_sb_error_id_to_text(struct printbuf *out, enum bch_sb_error_id id) void bch2_sb_error_id_to_text(struct printbuf *out, enum bch_sb_error_id id)
{ {
if (id < BCH_SB_ERR_MAX) if (id < BCH_FSCK_ERR_MAX)
prt_str(out, bch2_sb_error_strs[id]); prt_str(out, bch2_sb_error_strs[id]);
else else
prt_printf(out, "(unknown error %u)", id); prt_printf(out, "(unknown error %u)", id);
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
extern const char * const bch2_sb_error_strs[]; extern const char * const bch2_sb_error_strs[];
void bch2_sb_error_id_to_text(struct printbuf *, enum bch_sb_error_id);
extern const struct bch_sb_field_ops bch_sb_field_ops_errors; extern const struct bch_sb_field_ops bch_sb_field_ops_errors;
void bch2_sb_error_count(struct bch_fs *, enum bch_sb_error_id); void bch2_sb_error_count(struct bch_fs *, enum bch_sb_error_id);
......
...@@ -210,22 +210,23 @@ enum bch_fsck_flags { ...@@ -210,22 +210,23 @@ enum bch_fsck_flags {
x(inode_snapshot_mismatch, 196, 0) \ x(inode_snapshot_mismatch, 196, 0) \
x(inode_unlinked_but_clean, 197, 0) \ x(inode_unlinked_but_clean, 197, 0) \
x(inode_unlinked_but_nlink_nonzero, 198, 0) \ x(inode_unlinked_but_nlink_nonzero, 198, 0) \
x(inode_unlinked_and_not_open, 281, 0) \
x(inode_checksum_type_invalid, 199, 0) \ x(inode_checksum_type_invalid, 199, 0) \
x(inode_compression_type_invalid, 200, 0) \ x(inode_compression_type_invalid, 200, 0) \
x(inode_subvol_root_but_not_dir, 201, 0) \ x(inode_subvol_root_but_not_dir, 201, 0) \
x(inode_i_size_dirty_but_clean, 202, 0) \ x(inode_i_size_dirty_but_clean, 202, FSCK_AUTOFIX) \
x(inode_i_sectors_dirty_but_clean, 203, 0) \ x(inode_i_sectors_dirty_but_clean, 203, FSCK_AUTOFIX) \
x(inode_i_sectors_wrong, 204, 0) \ x(inode_i_sectors_wrong, 204, FSCK_AUTOFIX) \
x(inode_dir_wrong_nlink, 205, 0) \ x(inode_dir_wrong_nlink, 205, FSCK_AUTOFIX) \
x(inode_dir_multiple_links, 206, 0) \ x(inode_dir_multiple_links, 206, FSCK_AUTOFIX) \
x(inode_multiple_links_but_nlink_0, 207, 0) \ x(inode_multiple_links_but_nlink_0, 207, FSCK_AUTOFIX) \
x(inode_wrong_backpointer, 208, 0) \ x(inode_wrong_backpointer, 208, FSCK_AUTOFIX) \
x(inode_wrong_nlink, 209, 0) \ x(inode_wrong_nlink, 209, FSCK_AUTOFIX) \
x(inode_unreachable, 210, 0) \ x(inode_unreachable, 210, FSCK_AUTOFIX) \
x(deleted_inode_but_clean, 211, 0) \ x(deleted_inode_but_clean, 211, FSCK_AUTOFIX) \
x(deleted_inode_missing, 212, 0) \ x(deleted_inode_missing, 212, FSCK_AUTOFIX) \
x(deleted_inode_is_dir, 213, 0) \ x(deleted_inode_is_dir, 213, FSCK_AUTOFIX) \
x(deleted_inode_not_unlinked, 214, 0) \ x(deleted_inode_not_unlinked, 214, FSCK_AUTOFIX) \
x(extent_overlapping, 215, 0) \ x(extent_overlapping, 215, 0) \
x(key_in_missing_inode, 216, 0) \ x(key_in_missing_inode, 216, 0) \
x(key_in_wrong_inode_type, 217, 0) \ x(key_in_wrong_inode_type, 217, 0) \
...@@ -255,7 +256,7 @@ enum bch_fsck_flags { ...@@ -255,7 +256,7 @@ enum bch_fsck_flags {
x(dir_loop, 241, 0) \ x(dir_loop, 241, 0) \
x(hash_table_key_duplicate, 242, 0) \ x(hash_table_key_duplicate, 242, 0) \
x(hash_table_key_wrong_offset, 243, 0) \ x(hash_table_key_wrong_offset, 243, 0) \
x(unlinked_inode_not_on_deleted_list, 244, 0) \ x(unlinked_inode_not_on_deleted_list, 244, FSCK_AUTOFIX) \
x(reflink_p_front_pad_bad, 245, 0) \ x(reflink_p_front_pad_bad, 245, 0) \
x(journal_entry_dup_same_device, 246, 0) \ x(journal_entry_dup_same_device, 246, 0) \
x(inode_bi_subvol_missing, 247, 0) \ x(inode_bi_subvol_missing, 247, 0) \
...@@ -270,7 +271,7 @@ enum bch_fsck_flags { ...@@ -270,7 +271,7 @@ enum bch_fsck_flags {
x(subvol_children_not_set, 256, 0) \ x(subvol_children_not_set, 256, 0) \
x(subvol_children_bad, 257, 0) \ x(subvol_children_bad, 257, 0) \
x(subvol_loop, 258, 0) \ x(subvol_loop, 258, 0) \
x(subvol_unreachable, 259, 0) \ x(subvol_unreachable, 259, FSCK_AUTOFIX) \
x(btree_node_bkey_bad_u64s, 260, 0) \ x(btree_node_bkey_bad_u64s, 260, 0) \
x(btree_node_topology_empty_interior_node, 261, 0) \ x(btree_node_topology_empty_interior_node, 261, 0) \
x(btree_ptr_v2_min_key_bad, 262, 0) \ x(btree_ptr_v2_min_key_bad, 262, 0) \
...@@ -282,8 +283,8 @@ enum bch_fsck_flags { ...@@ -282,8 +283,8 @@ enum bch_fsck_flags {
x(btree_ptr_v2_written_0, 268, 0) \ x(btree_ptr_v2_written_0, 268, 0) \
x(subvol_snapshot_bad, 269, 0) \ x(subvol_snapshot_bad, 269, 0) \
x(subvol_inode_bad, 270, 0) \ x(subvol_inode_bad, 270, 0) \
x(alloc_key_stripe_sectors_wrong, 271, 0) \ x(alloc_key_stripe_sectors_wrong, 271, FSCK_AUTOFIX) \
x(accounting_mismatch, 272, 0) \ x(accounting_mismatch, 272, FSCK_AUTOFIX) \
x(accounting_replicas_not_marked, 273, 0) \ x(accounting_replicas_not_marked, 273, 0) \
x(invalid_btree_id, 274, 0) \ x(invalid_btree_id, 274, 0) \
x(alloc_key_io_time_bad, 275, 0) \ x(alloc_key_io_time_bad, 275, 0) \
...@@ -292,12 +293,14 @@ enum bch_fsck_flags { ...@@ -292,12 +293,14 @@ enum bch_fsck_flags {
x(accounting_key_replicas_nr_devs_0, 278, FSCK_AUTOFIX) \ x(accounting_key_replicas_nr_devs_0, 278, FSCK_AUTOFIX) \
x(accounting_key_replicas_nr_required_bad, 279, FSCK_AUTOFIX) \ x(accounting_key_replicas_nr_required_bad, 279, FSCK_AUTOFIX) \
x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \ x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \
x(accounting_key_version_0, 282, FSCK_AUTOFIX) \
x(logged_op_but_clean, 283, FSCK_AUTOFIX) \
x(MAX, 284, 0)
enum bch_sb_error_id { enum bch_sb_error_id {
#define x(t, n, ...) BCH_FSCK_ERR_##t = n, #define x(t, n, ...) BCH_FSCK_ERR_##t = n,
BCH_SB_ERRS() BCH_SB_ERRS()
#undef x #undef x
BCH_SB_ERR_MAX
}; };
struct bch_sb_field_errors { struct bch_sb_field_errors {
......
...@@ -169,11 +169,17 @@ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type, ...@@ -169,11 +169,17 @@ static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
ret = -1 - SIX_LOCK_write; ret = -1 - SIX_LOCK_write;
} }
} else if (type == SIX_LOCK_write && lock->readers) { } else if (type == SIX_LOCK_write && lock->readers) {
if (try) { if (try)
atomic_add(SIX_LOCK_HELD_write, &lock->state); atomic_add(SIX_LOCK_HELD_write, &lock->state);
smp_mb__after_atomic();
}
/*
* Make sure atomic_add happens before pcpu_read_count and
* six_set_bitmask in slow path happens before pcpu_read_count.
*
* Paired with the smp_mb() in read lock fast path (per-cpu mode)
* and the one before atomic_read in read unlock path.
*/
smp_mb();
ret = !pcpu_read_count(lock); ret = !pcpu_read_count(lock);
if (try && !ret) { if (try && !ret) {
......
...@@ -469,6 +469,7 @@ static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root) ...@@ -469,6 +469,7 @@ static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
u32 id = snapshot_root; u32 id = snapshot_root;
u32 subvol = 0, s; u32 subvol = 0, s;
rcu_read_lock();
while (id) { while (id) {
s = snapshot_t(c, id)->subvol; s = snapshot_t(c, id)->subvol;
...@@ -477,6 +478,7 @@ static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root) ...@@ -477,6 +478,7 @@ static u32 bch2_snapshot_tree_oldest_subvol(struct bch_fs *c, u32 snapshot_root)
id = bch2_snapshot_tree_next(c, id); id = bch2_snapshot_tree_next(c, id);
} }
rcu_read_unlock();
return subvol; return subvol;
} }
...@@ -1782,6 +1784,7 @@ static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans, ...@@ -1782,6 +1784,7 @@ static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
new->k.p.snapshot = leaf_id; new->k.p.snapshot = leaf_id;
ret = bch2_trans_update(trans, &iter, new, 0); ret = bch2_trans_update(trans, &iter, new, 0);
out: out:
bch2_set_btree_iter_dontneed(&iter);
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
return ret; return ret;
} }
......
...@@ -92,35 +92,33 @@ static int check_subvol(struct btree_trans *trans, ...@@ -92,35 +92,33 @@ static int check_subvol(struct btree_trans *trans,
} }
struct bch_inode_unpacked inode; struct bch_inode_unpacked inode;
struct btree_iter inode_iter = {}; ret = bch2_inode_find_by_inum_nowarn_trans(trans,
ret = bch2_inode_peek_nowarn(trans, &inode_iter, &inode,
(subvol_inum) { k.k->p.offset, le64_to_cpu(subvol.v->inode) }, (subvol_inum) { k.k->p.offset, le64_to_cpu(subvol.v->inode) },
0); &inode);
bch2_trans_iter_exit(trans, &inode_iter); if (!ret) {
if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
if (fsck_err_on(ret,
trans, subvol_to_missing_root,
"subvolume %llu points to missing subvolume root %llu:%u",
k.k->p.offset, le64_to_cpu(subvol.v->inode),
le32_to_cpu(subvol.v->snapshot))) {
ret = bch2_subvolume_delete(trans, iter->pos.offset);
bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset);
return ret ?: -BCH_ERR_transaction_restart_nested;
}
if (fsck_err_on(inode.bi_subvol != subvol.k->p.offset, if (fsck_err_on(inode.bi_subvol != subvol.k->p.offset,
trans, subvol_root_wrong_bi_subvol, trans, subvol_root_wrong_bi_subvol,
"subvol root %llu:%u has wrong bi_subvol field: got %u, should be %llu", "subvol root %llu:%u has wrong bi_subvol field: got %u, should be %llu",
inode.bi_inum, inode_iter.k.p.snapshot, inode.bi_inum, inode.bi_snapshot,
inode.bi_subvol, subvol.k->p.offset)) { inode.bi_subvol, subvol.k->p.offset)) {
inode.bi_subvol = subvol.k->p.offset; inode.bi_subvol = subvol.k->p.offset;
ret = __bch2_fsck_write_inode(trans, &inode, le32_to_cpu(subvol.v->snapshot)); ret = __bch2_fsck_write_inode(trans, &inode, le32_to_cpu(subvol.v->snapshot));
if (ret) if (ret)
goto err; goto err;
} }
} else if (bch2_err_matches(ret, ENOENT)) {
if (fsck_err(trans, subvol_to_missing_root,
"subvolume %llu points to missing subvolume root %llu:%u",
k.k->p.offset, le64_to_cpu(subvol.v->inode),
le32_to_cpu(subvol.v->snapshot))) {
ret = bch2_subvolume_delete(trans, iter->pos.offset);
bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset);
ret = ret ?: -BCH_ERR_transaction_restart_nested;
goto err;
}
} else {
goto err;
}
if (!BCH_SUBVOLUME_SNAP(subvol.v)) { if (!BCH_SUBVOLUME_SNAP(subvol.v)) {
u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.v->snapshot)); u32 snapshot_root = bch2_snapshot_root(c, le32_to_cpu(subvol.v->snapshot));
...@@ -137,7 +135,7 @@ static int check_subvol(struct btree_trans *trans, ...@@ -137,7 +135,7 @@ static int check_subvol(struct btree_trans *trans,
"%s: snapshot tree %u not found", __func__, snapshot_tree); "%s: snapshot tree %u not found", __func__, snapshot_tree);
if (ret) if (ret)
return ret; goto err;
if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset,
trans, subvol_not_master_and_not_snapshot, trans, subvol_not_master_and_not_snapshot,
...@@ -147,7 +145,7 @@ static int check_subvol(struct btree_trans *trans, ...@@ -147,7 +145,7 @@ static int check_subvol(struct btree_trans *trans,
bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume); bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume);
ret = PTR_ERR_OR_ZERO(s); ret = PTR_ERR_OR_ZERO(s);
if (ret) if (ret)
return ret; goto err;
SET_BCH_SUBVOLUME_SNAP(&s->v, true); SET_BCH_SUBVOLUME_SNAP(&s->v, true);
} }
......
...@@ -799,8 +799,10 @@ static int __bch2_read_super(const char *path, struct bch_opts *opts, ...@@ -799,8 +799,10 @@ static int __bch2_read_super(const char *path, struct bch_opts *opts,
i < layout.sb_offset + layout.nr_superblocks; i++) { i < layout.sb_offset + layout.nr_superblocks; i++) {
offset = le64_to_cpu(*i); offset = le64_to_cpu(*i);
if (offset == opt_get(*opts, sb)) if (offset == opt_get(*opts, sb)) {
ret = -BCH_ERR_invalid;
continue; continue;
}
ret = read_one_super(sb, offset, &err); ret = read_one_super(sb, offset, &err);
if (!ret) if (!ret)
...@@ -1188,7 +1190,8 @@ static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb, ...@@ -1188,7 +1190,8 @@ static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb,
le_bitvector_to_cpu(errors_silent, (void *) e->errors_silent, sizeof(e->errors_silent) * 8); le_bitvector_to_cpu(errors_silent, (void *) e->errors_silent, sizeof(e->errors_silent) * 8);
prt_printf(out, "Errors to silently fix:\t"); prt_printf(out, "Errors to silently fix:\t");
prt_bitflags_vector(out, bch2_sb_error_strs, errors_silent, sizeof(e->errors_silent) * 8); prt_bitflags_vector(out, bch2_sb_error_strs, errors_silent,
min(BCH_FSCK_ERR_MAX, sizeof(e->errors_silent) * 8));
prt_newline(out); prt_newline(out);
kfree(errors_silent); kfree(errors_silent);
......
...@@ -394,7 +394,7 @@ static int insert_test_extent(struct bch_fs *c, ...@@ -394,7 +394,7 @@ static int insert_test_extent(struct bch_fs *c,
k.k_i.k.p.offset = end; k.k_i.k.p.offset = end;
k.k_i.k.p.snapshot = U32_MAX; k.k_i.k.p.snapshot = U32_MAX;
k.k_i.k.size = end - start; k.k_i.k.size = end - start;
k.k_i.k.version.lo = test_version++; k.k_i.k.bversion.lo = test_version++;
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0, 0); ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i, NULL, 0, 0);
bch_err_fn(c, ret); bch_err_fn(c, ret);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment