Commit 42b72e0b authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: journal_replay_early()

Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 3ccc5c50
...@@ -1249,19 +1249,3 @@ int bch2_gc_thread_start(struct bch_fs *c) ...@@ -1249,19 +1249,3 @@ int bch2_gc_thread_start(struct bch_fs *c)
wake_up_process(p); wake_up_process(p);
return 0; return 0;
} }
/* Initial GC computes bucket marks during startup */
int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
{
int ret = bch2_gc(c, journal, true);
/*
* Skip past versions that might have possibly been used (as nonces),
* but hadn't had their pointers written:
*/
if (c->sb.encryption_type)
atomic64_add(1 << 16, &c->key_version);
return ret;
}
...@@ -8,7 +8,6 @@ void bch2_coalesce(struct bch_fs *); ...@@ -8,7 +8,6 @@ void bch2_coalesce(struct bch_fs *);
int bch2_gc(struct bch_fs *, struct list_head *, bool); int bch2_gc(struct bch_fs *, struct list_head *, bool);
void bch2_gc_thread_stop(struct bch_fs *); void bch2_gc_thread_stop(struct bch_fs *);
int bch2_gc_thread_start(struct bch_fs *); int bch2_gc_thread_start(struct bch_fs *);
int bch2_initial_gc(struct bch_fs *, struct list_head *);
void bch2_mark_dev_superblock(struct bch_fs *, struct bch_dev *, unsigned); void bch2_mark_dev_superblock(struct bch_fs *, struct bch_dev *, unsigned);
/* /*
......
...@@ -475,6 +475,7 @@ struct btree_root { ...@@ -475,6 +475,7 @@ struct btree_root {
__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX); __BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
u8 level; u8 level;
u8 alive; u8 alive;
s8 error;
}; };
/* /*
......
...@@ -2122,7 +2122,6 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b) ...@@ -2122,7 +2122,6 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
BUG_ON(btree_node_root(c, b)); BUG_ON(btree_node_root(c, b));
__bch2_btree_set_root_inmem(c, b); __bch2_btree_set_root_inmem(c, b);
bch2_btree_set_root_ondisk(c, b, READ);
} }
void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id) void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
......
...@@ -13,16 +13,17 @@ ...@@ -13,16 +13,17 @@
#include "journal_io.h" #include "journal_io.h"
#include "quota.h" #include "quota.h"
#include "recovery.h" #include "recovery.h"
#include "replicas.h"
#include "super-io.h" #include "super-io.h"
#include <linux/stat.h> #include <linux/stat.h>
#define QSTR(n) { { { .len = strlen(n) } }, .name = n } #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
struct bkey_i *btree_root_find(struct bch_fs *c, static struct bkey_i *btree_root_find(struct bch_fs *c,
struct bch_sb_field_clean *clean, struct bch_sb_field_clean *clean,
struct jset *j, struct jset *j,
enum btree_id id, unsigned *level) enum btree_id id, unsigned *level)
{ {
struct bkey_i *k; struct bkey_i *k;
struct jset_entry *entry, *start, *end; struct jset_entry *entry, *start, *end;
...@@ -50,6 +51,51 @@ struct bkey_i *btree_root_find(struct bch_fs *c, ...@@ -50,6 +51,51 @@ struct bkey_i *btree_root_find(struct bch_fs *c,
return k; return k;
} }
static int journal_replay_entry_early(struct bch_fs *c,
struct jset_entry *entry)
{
int ret = 0;
switch (entry->type) {
case BCH_JSET_ENTRY_btree_root: {
struct btree_root *r = &c->btree_roots[entry->btree_id];
if (entry->u64s) {
r->level = entry->level;
bkey_copy(&r->key, &entry->start[0]);
r->error = 0;
} else {
r->error = -EIO;
}
r->alive = true;
break;
}
case BCH_JSET_ENTRY_usage: {
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
switch (u->type) {
case FS_USAGE_REPLICAS:
ret = bch2_replicas_set_usage(c, &u->r,
le64_to_cpu(u->sectors));
break;
case FS_USAGE_INODES:
percpu_u64_set(&c->usage[0]->s.nr_inodes,
le64_to_cpu(u->sectors));
break;
case FS_USAGE_KEY_VERSION:
atomic64_set(&c->key_version,
le64_to_cpu(u->sectors));
break;
}
break;
}
}
return ret;
}
static int verify_superblock_clean(struct bch_fs *c, static int verify_superblock_clean(struct bch_fs *c,
struct bch_sb_field_clean *clean, struct bch_sb_field_clean *clean,
struct jset *j) struct jset *j)
...@@ -126,6 +172,7 @@ int bch2_fs_recovery(struct bch_fs *c) ...@@ -126,6 +172,7 @@ int bch2_fs_recovery(struct bch_fs *c)
{ {
const char *err = "cannot allocate memory"; const char *err = "cannot allocate memory";
struct bch_sb_field_clean *clean = NULL, *sb_clean = NULL; struct bch_sb_field_clean *clean = NULL, *sb_clean = NULL;
struct jset_entry *entry;
LIST_HEAD(journal); LIST_HEAD(journal);
struct jset *j = NULL; struct jset *j = NULL;
unsigned i; unsigned i;
...@@ -178,28 +225,44 @@ int bch2_fs_recovery(struct bch_fs *c) ...@@ -178,28 +225,44 @@ int bch2_fs_recovery(struct bch_fs *c)
fsck_err_on(clean && !journal_empty(&journal), c, fsck_err_on(clean && !journal_empty(&journal), c,
"filesystem marked clean but journal not empty"); "filesystem marked clean but journal not empty");
err = "insufficient memory";
if (clean) { if (clean) {
c->bucket_clock[READ].hand = le16_to_cpu(clean->read_clock); c->bucket_clock[READ].hand = le16_to_cpu(clean->read_clock);
c->bucket_clock[WRITE].hand = le16_to_cpu(clean->write_clock); c->bucket_clock[WRITE].hand = le16_to_cpu(clean->write_clock);
for (entry = clean->start;
entry != vstruct_end(&clean->field);
entry = vstruct_next(entry)) {
ret = journal_replay_entry_early(c, entry);
if (ret)
goto err;
}
} else { } else {
struct journal_replay *i;
c->bucket_clock[READ].hand = le16_to_cpu(j->read_clock); c->bucket_clock[READ].hand = le16_to_cpu(j->read_clock);
c->bucket_clock[WRITE].hand = le16_to_cpu(j->write_clock); c->bucket_clock[WRITE].hand = le16_to_cpu(j->write_clock);
list_for_each_entry(i, &journal, list)
vstruct_for_each(&i->j, entry) {
ret = journal_replay_entry_early(c, entry);
if (ret)
goto err;
}
} }
for (i = 0; i < BTREE_ID_NR; i++) { for (i = 0; i < BTREE_ID_NR; i++) {
unsigned level; struct btree_root *r = &c->btree_roots[i];
struct bkey_i *k;
k = btree_root_find(c, clean, j, i, &level); if (!r->alive)
if (!k)
continue; continue;
err = "invalid btree root pointer"; err = "invalid btree root pointer";
if (IS_ERR(k)) if (r->error)
goto err; goto err;
err = "error reading btree root"; err = "error reading btree root";
if (bch2_btree_root_read(c, i, k, level)) { if (bch2_btree_root_read(c, i, &r->key, r->level)) {
if (i != BTREE_ID_ALLOC) if (i != BTREE_ID_ALLOC)
goto err; goto err;
...@@ -226,13 +289,20 @@ int bch2_fs_recovery(struct bch_fs *c) ...@@ -226,13 +289,20 @@ int bch2_fs_recovery(struct bch_fs *c)
bch_verbose(c, "starting mark and sweep:"); bch_verbose(c, "starting mark and sweep:");
err = "error in recovery"; err = "error in recovery";
ret = bch2_initial_gc(c, &journal); ret = bch2_gc(c, &journal, true);
if (ret) if (ret)
goto err; goto err;
bch_verbose(c, "mark and sweep done"); bch_verbose(c, "mark and sweep done");
clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags); clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
/*
* Skip past versions that might have possibly been used (as nonces),
* but hadn't had their pointers written:
*/
if (c->sb.encryption_type && !c->sb.clean)
atomic64_add(1 << 16, &c->key_version);
if (c->opts.noreplay) if (c->opts.noreplay)
goto out; goto out;
...@@ -319,7 +389,7 @@ int bch2_fs_initialize(struct bch_fs *c) ...@@ -319,7 +389,7 @@ int bch2_fs_initialize(struct bch_fs *c)
for (i = 0; i < BTREE_ID_NR; i++) for (i = 0; i < BTREE_ID_NR; i++)
bch2_btree_root_alloc(c, i); bch2_btree_root_alloc(c, i);
ret = bch2_initial_gc(c, &journal); ret = bch2_gc(c, &journal, true);
if (ret) if (ret)
goto err; goto err;
......
...@@ -530,6 +530,34 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask) ...@@ -530,6 +530,34 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
return 0; return 0;
} }
int bch2_replicas_set_usage(struct bch_fs *c,
struct bch_replicas_entry *r,
u64 sectors)
{
int ret, idx = bch2_replicas_entry_idx(c, r);
if (idx < 0) {
struct bch_replicas_cpu n;
n = cpu_replicas_add_entry(&c->replicas, r);
if (!n.entries)
return -ENOMEM;
ret = replicas_table_update(c, &n);
if (ret)
return ret;
kfree(n.entries);
idx = bch2_replicas_entry_idx(c, r);
BUG_ON(ret < 0);
}
percpu_u64_set(&c->usage[0]->data[idx], sectors);
return 0;
}
/* Replicas tracking - superblock: */ /* Replicas tracking - superblock: */
static int static int
......
...@@ -57,6 +57,10 @@ unsigned bch2_dev_has_data(struct bch_fs *, struct bch_dev *); ...@@ -57,6 +57,10 @@ unsigned bch2_dev_has_data(struct bch_fs *, struct bch_dev *);
int bch2_replicas_gc_end(struct bch_fs *, int); int bch2_replicas_gc_end(struct bch_fs *, int);
int bch2_replicas_gc_start(struct bch_fs *, unsigned); int bch2_replicas_gc_start(struct bch_fs *, unsigned);
int bch2_replicas_set_usage(struct bch_fs *,
struct bch_replicas_entry *,
u64);
#define for_each_cpu_replicas_entry(_r, _i) \ #define for_each_cpu_replicas_entry(_r, _i) \
for (_i = (_r)->entries; \ for (_i = (_r)->entries; \
(void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\ (void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size;\
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment