Commit c18536a7 authored by Kent Overstreet's avatar Kent Overstreet

bcache: Prune struct btree_op

Eventual goal is for struct btree_op to contain only what is necessary
for traversing the btree.
Signed-off-by: default avatarKent Overstreet <kmo@daterainc.com>
parent cc231966
...@@ -1197,7 +1197,6 @@ int bch_bset_print_stats(struct cache_set *c, char *buf) ...@@ -1197,7 +1197,6 @@ int bch_bset_print_stats(struct cache_set *c, char *buf)
memset(&t, 0, sizeof(struct bset_stats)); memset(&t, 0, sizeof(struct bset_stats));
bch_btree_op_init_stack(&t.op); bch_btree_op_init_stack(&t.op);
t.op.c = c;
ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats); ret = bch_btree_map_nodes(&t.op, c, &ZERO_KEY, btree_bset_stats);
if (ret < 0) if (ret < 0)
......
...@@ -503,7 +503,7 @@ static void btree_node_write_work(struct work_struct *w) ...@@ -503,7 +503,7 @@ static void btree_node_write_work(struct work_struct *w)
rw_unlock(true, b); rw_unlock(true, b);
} }
static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op) static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
{ {
struct bset *i = b->sets[b->nsets].data; struct bset *i = b->sets[b->nsets].data;
struct btree_write *w = btree_current_write(b); struct btree_write *w = btree_current_write(b);
...@@ -516,15 +516,15 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op) ...@@ -516,15 +516,15 @@ static void bch_btree_leaf_dirty(struct btree *b, struct btree_op *op)
set_btree_node_dirty(b); set_btree_node_dirty(b);
if (op->journal) { if (journal_ref) {
if (w->journal && if (w->journal &&
journal_pin_cmp(b->c, w, op)) { journal_pin_cmp(b->c, w->journal, journal_ref)) {
atomic_dec_bug(w->journal); atomic_dec_bug(w->journal);
w->journal = NULL; w->journal = NULL;
} }
if (!w->journal) { if (!w->journal) {
w->journal = op->journal; w->journal = journal_ref;
atomic_inc(w->journal); atomic_inc(w->journal);
} }
} }
...@@ -1663,13 +1663,16 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op, ...@@ -1663,13 +1663,16 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
return 0; return 0;
} }
int bch_btree_check(struct cache_set *c, struct btree_op *op) int bch_btree_check(struct cache_set *c)
{ {
int ret = -ENOMEM; int ret = -ENOMEM;
unsigned i; unsigned i;
unsigned long *seen[MAX_CACHES_PER_SET]; unsigned long *seen[MAX_CACHES_PER_SET];
struct btree_op op;
memset(seen, 0, sizeof(seen)); memset(seen, 0, sizeof(seen));
bch_btree_op_init_stack(&op);
op.lock = SHRT_MAX;
for (i = 0; c->cache[i]; i++) { for (i = 0; c->cache[i]; i++) {
size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8); size_t n = DIV_ROUND_UP(c->cache[i]->sb.nbuckets, 8);
...@@ -1681,7 +1684,7 @@ int bch_btree_check(struct cache_set *c, struct btree_op *op) ...@@ -1681,7 +1684,7 @@ int bch_btree_check(struct cache_set *c, struct btree_op *op)
memset(seen[i], 0xFF, n); memset(seen[i], 0xFF, n);
} }
ret = btree_root(check_recurse, c, op, seen); ret = btree_root(check_recurse, c, &op, seen);
err: err:
for (i = 0; i < MAX_CACHES_PER_SET; i++) for (i = 0; i < MAX_CACHES_PER_SET; i++)
kfree(seen[i]); kfree(seen[i]);
...@@ -2091,7 +2094,8 @@ static int btree_split(struct btree *b, struct btree_op *op, ...@@ -2091,7 +2094,8 @@ static int btree_split(struct btree *b, struct btree_op *op,
} }
static int bch_btree_insert_node(struct btree *b, struct btree_op *op, static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
struct keylist *insert_keys) struct keylist *insert_keys,
atomic_t *journal_ref)
{ {
int ret = 0; int ret = 0;
struct keylist split_keys; struct keylist split_keys;
...@@ -2123,7 +2127,7 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op, ...@@ -2123,7 +2127,7 @@ static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
if (bch_btree_insert_keys(b, op, insert_keys)) { if (bch_btree_insert_keys(b, op, insert_keys)) {
if (!b->level) if (!b->level)
bch_btree_leaf_dirty(b, op); bch_btree_leaf_dirty(b, journal_ref);
else else
bch_btree_node_write(b, &op->cl); bch_btree_node_write(b, &op->cl);
} }
...@@ -2162,7 +2166,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, ...@@ -2162,7 +2166,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
BUG_ON(op->type != BTREE_INSERT); BUG_ON(op->type != BTREE_INSERT);
ret = bch_btree_insert_node(b, op, &insert); ret = bch_btree_insert_node(b, op, &insert, NULL);
BUG_ON(!ret && !bch_keylist_empty(&insert)); BUG_ON(!ret && !bch_keylist_empty(&insert));
out: out:
...@@ -2172,7 +2176,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, ...@@ -2172,7 +2176,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
} }
static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op, static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
struct keylist *keys) struct keylist *keys, atomic_t *journal_ref)
{ {
if (bch_keylist_empty(keys)) if (bch_keylist_empty(keys))
return 0; return 0;
...@@ -2189,14 +2193,14 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op, ...@@ -2189,14 +2193,14 @@ static int bch_btree_insert_recurse(struct btree *b, struct btree_op *op,
return -EIO; return -EIO;
} }
return btree(insert_recurse, k, b, op, keys); return btree(insert_recurse, k, b, op, keys, journal_ref);
} else { } else {
return bch_btree_insert_node(b, op, keys); return bch_btree_insert_node(b, op, keys, journal_ref);
} }
} }
int bch_btree_insert(struct btree_op *op, struct cache_set *c, int bch_btree_insert(struct btree_op *op, struct cache_set *c,
struct keylist *keys) struct keylist *keys, atomic_t *journal_ref)
{ {
int ret = 0; int ret = 0;
...@@ -2210,7 +2214,7 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c, ...@@ -2210,7 +2214,7 @@ int bch_btree_insert(struct btree_op *op, struct cache_set *c,
while (!bch_keylist_empty(keys)) { while (!bch_keylist_empty(keys)) {
op->lock = 0; op->lock = 0;
ret = btree_root(insert_recurse, c, op, keys); ret = btree_root(insert_recurse, c, op, keys, journal_ref);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
ret = 0; ret = 0;
......
...@@ -238,17 +238,6 @@ void __bkey_put(struct cache_set *c, struct bkey *k); ...@@ -238,17 +238,6 @@ void __bkey_put(struct cache_set *c, struct bkey *k);
struct btree_op { struct btree_op {
struct closure cl; struct closure cl;
struct cache_set *c;
/* Journal entry we have a refcount on */
atomic_t *journal;
/* Bio to be inserted into the cache */
struct bio *cache_bio;
unsigned inode;
uint16_t write_prio;
/* Btree level at which we start taking write locks */ /* Btree level at which we start taking write locks */
short lock; short lock;
...@@ -259,11 +248,6 @@ struct btree_op { ...@@ -259,11 +248,6 @@ struct btree_op {
BTREE_REPLACE BTREE_REPLACE
} type:8; } type:8;
unsigned csum:1;
unsigned bypass:1;
unsigned flush_journal:1;
unsigned insert_data_done:1;
unsigned insert_collision:1; unsigned insert_collision:1;
BKEY_PADDED(replace); BKEY_PADDED(replace);
...@@ -303,12 +287,13 @@ struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool); ...@@ -303,12 +287,13 @@ struct btree *bch_btree_node_get(struct cache_set *, struct bkey *, int, bool);
int bch_btree_insert_check_key(struct btree *, struct btree_op *, int bch_btree_insert_check_key(struct btree *, struct btree_op *,
struct bkey *); struct bkey *);
int bch_btree_insert(struct btree_op *, struct cache_set *, struct keylist *); int bch_btree_insert(struct btree_op *, struct cache_set *,
struct keylist *, atomic_t *);
int bch_gc_thread_start(struct cache_set *); int bch_gc_thread_start(struct cache_set *);
size_t bch_btree_gc_finish(struct cache_set *); size_t bch_btree_gc_finish(struct cache_set *);
void bch_moving_gc(struct cache_set *); void bch_moving_gc(struct cache_set *);
int bch_btree_check(struct cache_set *, struct btree_op *); int bch_btree_check(struct cache_set *);
uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *); uint8_t __bch_btree_mark_key(struct cache_set *, int, struct bkey *);
static inline void wake_up_gc(struct cache_set *c) static inline void wake_up_gc(struct cache_set *c)
......
...@@ -30,17 +30,20 @@ static void journal_read_endio(struct bio *bio, int error) ...@@ -30,17 +30,20 @@ static void journal_read_endio(struct bio *bio, int error)
} }
static int journal_read_bucket(struct cache *ca, struct list_head *list, static int journal_read_bucket(struct cache *ca, struct list_head *list,
struct btree_op *op, unsigned bucket_index) unsigned bucket_index)
{ {
struct journal_device *ja = &ca->journal; struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio; struct bio *bio = &ja->bio;
struct journal_replay *i; struct journal_replay *i;
struct jset *j, *data = ca->set->journal.w[0].data; struct jset *j, *data = ca->set->journal.w[0].data;
struct closure cl;
unsigned len, left, offset = 0; unsigned len, left, offset = 0;
int ret = 0; int ret = 0;
sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]); sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
closure_init_stack(&cl);
pr_debug("reading %llu", (uint64_t) bucket); pr_debug("reading %llu", (uint64_t) bucket);
while (offset < ca->sb.bucket_size) { while (offset < ca->sb.bucket_size) {
...@@ -54,11 +57,11 @@ reread: left = ca->sb.bucket_size - offset; ...@@ -54,11 +57,11 @@ reread: left = ca->sb.bucket_size - offset;
bio->bi_size = len << 9; bio->bi_size = len << 9;
bio->bi_end_io = journal_read_endio; bio->bi_end_io = journal_read_endio;
bio->bi_private = &op->cl; bio->bi_private = &cl;
bch_bio_map(bio, data); bch_bio_map(bio, data);
closure_bio_submit(bio, &op->cl, ca); closure_bio_submit(bio, &cl, ca);
closure_sync(&op->cl); closure_sync(&cl);
/* This function could be simpler now since we no longer write /* This function could be simpler now since we no longer write
* journal entries that overlap bucket boundaries; this means * journal entries that overlap bucket boundaries; this means
...@@ -128,12 +131,11 @@ reread: left = ca->sb.bucket_size - offset; ...@@ -128,12 +131,11 @@ reread: left = ca->sb.bucket_size - offset;
return ret; return ret;
} }
int bch_journal_read(struct cache_set *c, struct list_head *list, int bch_journal_read(struct cache_set *c, struct list_head *list)
struct btree_op *op)
{ {
#define read_bucket(b) \ #define read_bucket(b) \
({ \ ({ \
int ret = journal_read_bucket(ca, list, op, b); \ int ret = journal_read_bucket(ca, list, b); \
__set_bit(b, bitmap); \ __set_bit(b, bitmap); \
if (ret < 0) \ if (ret < 0) \
return ret; \ return ret; \
...@@ -291,8 +293,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list) ...@@ -291,8 +293,7 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
} }
} }
int bch_journal_replay(struct cache_set *s, struct list_head *list, int bch_journal_replay(struct cache_set *s, struct list_head *list)
struct btree_op *op)
{ {
int ret = 0, keys = 0, entries = 0; int ret = 0, keys = 0, entries = 0;
struct bkey *k; struct bkey *k;
...@@ -301,8 +302,11 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list, ...@@ -301,8 +302,11 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
uint64_t start = i->j.last_seq, end = i->j.seq, n = start; uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
struct keylist keylist; struct keylist keylist;
struct btree_op op;
bch_keylist_init(&keylist); bch_keylist_init(&keylist);
bch_btree_op_init_stack(&op);
op.lock = SHRT_MAX;
list_for_each_entry(i, list, list) { list_for_each_entry(i, list, list) {
BUG_ON(i->pin && atomic_read(i->pin) != 1); BUG_ON(i->pin && atomic_read(i->pin) != 1);
...@@ -319,9 +323,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list, ...@@ -319,9 +323,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
bkey_copy(keylist.top, k); bkey_copy(keylist.top, k);
bch_keylist_push(&keylist); bch_keylist_push(&keylist);
op->journal = i->pin; ret = bch_btree_insert(&op, s, &keylist, i->pin);
ret = bch_btree_insert(op, s, &keylist);
if (ret) if (ret)
goto err; goto err;
...@@ -346,7 +348,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list, ...@@ -346,7 +348,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list,
kfree(i); kfree(i);
} }
err: err:
closure_sync(&op->cl); closure_sync(&op.cl);
return ret; return ret;
} }
...@@ -368,8 +370,8 @@ static void btree_flush_write(struct cache_set *c) ...@@ -368,8 +370,8 @@ static void btree_flush_write(struct cache_set *c)
if (!best) if (!best)
best = b; best = b;
else if (journal_pin_cmp(c, else if (journal_pin_cmp(c,
btree_current_write(best), btree_current_write(best)->journal,
btree_current_write(b))) { btree_current_write(b)->journal)) {
best = b; best = b;
} }
} }
......
...@@ -189,8 +189,7 @@ struct journal_device { ...@@ -189,8 +189,7 @@ struct journal_device {
}; };
#define journal_pin_cmp(c, l, r) \ #define journal_pin_cmp(c, l, r) \
(fifo_idx(&(c)->journal.pin, (l)->journal) > \ (fifo_idx(&(c)->journal.pin, (l)) > fifo_idx(&(c)->journal.pin, (r)))
fifo_idx(&(c)->journal.pin, (r)->journal))
#define JOURNAL_PIN 20000 #define JOURNAL_PIN 20000
...@@ -206,10 +205,8 @@ atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *); ...@@ -206,10 +205,8 @@ atomic_t *bch_journal(struct cache_set *, struct keylist *, struct closure *);
void bch_journal_next(struct journal *); void bch_journal_next(struct journal *);
void bch_journal_mark(struct cache_set *, struct list_head *); void bch_journal_mark(struct cache_set *, struct list_head *);
void bch_journal_meta(struct cache_set *, struct closure *); void bch_journal_meta(struct cache_set *, struct closure *);
int bch_journal_read(struct cache_set *, struct list_head *, int bch_journal_read(struct cache_set *, struct list_head *);
struct btree_op *); int bch_journal_replay(struct cache_set *, struct list_head *);
int bch_journal_replay(struct cache_set *, struct list_head *,
struct btree_op *);
void bch_journal_free(struct cache_set *); void bch_journal_free(struct cache_set *);
int bch_journal_alloc(struct cache_set *); int bch_journal_alloc(struct cache_set *);
......
...@@ -55,9 +55,9 @@ static void write_moving_finish(struct closure *cl) ...@@ -55,9 +55,9 @@ static void write_moving_finish(struct closure *cl)
if (io->s.op.insert_collision) if (io->s.op.insert_collision)
trace_bcache_gc_copy_collision(&io->w->key); trace_bcache_gc_copy_collision(&io->w->key);
bch_keybuf_del(&io->s.op.c->moving_gc_keys, io->w); bch_keybuf_del(&io->s.c->moving_gc_keys, io->w);
up(&io->s.op.c->moving_in_flight); up(&io->s.c->moving_in_flight);
closure_return_with_destructor(cl, moving_io_destructor); closure_return_with_destructor(cl, moving_io_destructor);
} }
...@@ -70,7 +70,7 @@ static void read_moving_endio(struct bio *bio, int error) ...@@ -70,7 +70,7 @@ static void read_moving_endio(struct bio *bio, int error)
if (error) if (error)
io->s.error = error; io->s.error = error;
bch_bbio_endio(io->s.op.c, bio, error, "reading data to move"); bch_bbio_endio(io->s.c, bio, error, "reading data to move");
} }
static void moving_init(struct moving_io *io) static void moving_init(struct moving_io *io)
...@@ -99,11 +99,11 @@ static void write_moving(struct closure *cl) ...@@ -99,11 +99,11 @@ static void write_moving(struct closure *cl)
io->bio.bio.bi_sector = KEY_START(&io->w->key); io->bio.bio.bi_sector = KEY_START(&io->w->key);
s->op.lock = -1; s->op.lock = -1;
s->op.write_prio = 1; s->write_prio = 1;
s->op.cache_bio = &io->bio.bio; s->cache_bio = &io->bio.bio;
s->writeback = KEY_DIRTY(&io->w->key); s->writeback = KEY_DIRTY(&io->w->key);
s->op.csum = KEY_CSUM(&io->w->key); s->csum = KEY_CSUM(&io->w->key);
s->op.type = BTREE_REPLACE; s->op.type = BTREE_REPLACE;
bkey_copy(&s->op.replace, &io->w->key); bkey_copy(&s->op.replace, &io->w->key);
...@@ -121,7 +121,7 @@ static void read_moving_submit(struct closure *cl) ...@@ -121,7 +121,7 @@ static void read_moving_submit(struct closure *cl)
struct moving_io *io = container_of(s, struct moving_io, s); struct moving_io *io = container_of(s, struct moving_io, s);
struct bio *bio = &io->bio.bio; struct bio *bio = &io->bio.bio;
bch_submit_bbio(bio, s->op.c, &io->w->key, 0); bch_submit_bbio(bio, s->c, &io->w->key, 0);
continue_at(cl, write_moving, system_wq); continue_at(cl, write_moving, system_wq);
} }
...@@ -151,8 +151,8 @@ static void read_moving(struct cache_set *c) ...@@ -151,8 +151,8 @@ static void read_moving(struct cache_set *c)
w->private = io; w->private = io;
io->w = w; io->w = w;
io->s.op.inode = KEY_INODE(&w->key); io->s.inode = KEY_INODE(&w->key);
io->s.op.c = c; io->s.c = c;
moving_init(io); moving_init(io);
bio = &io->bio.bio; bio = &io->bio.bio;
......
This diff is collapsed.
...@@ -8,19 +8,33 @@ struct search { ...@@ -8,19 +8,33 @@ struct search {
struct closure cl; struct closure cl;
struct bcache_device *d; struct bcache_device *d;
struct cache_set *c;
struct task_struct *task; struct task_struct *task;
struct bbio bio; struct bbio bio;
struct bio *orig_bio; struct bio *orig_bio;
struct bio *cache_miss; struct bio *cache_miss;
/* Bio to be inserted into the cache */
struct bio *cache_bio;
unsigned cache_bio_sectors; unsigned cache_bio_sectors;
unsigned inode;
unsigned recoverable:1; unsigned recoverable:1;
unsigned unaligned_bvec:1; unsigned unaligned_bvec:1;
unsigned write:1; unsigned write:1;
unsigned writeback:1; unsigned writeback:1;
unsigned csum:1;
unsigned bypass:1;
unsigned flush_journal:1;
unsigned insert_data_done:1;
uint16_t write_prio;
/* IO error returned to s->bio */ /* IO error returned to s->bio */
short error; short error;
unsigned long start_time; unsigned long start_time;
......
...@@ -200,7 +200,7 @@ void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass) ...@@ -200,7 +200,7 @@ void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
{ {
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
mark_cache_stats(&dc->accounting.collector, hit, bypass); mark_cache_stats(&dc->accounting.collector, hit, bypass);
mark_cache_stats(&s->op.c->accounting.collector, hit, bypass); mark_cache_stats(&s->c->accounting.collector, hit, bypass);
#ifdef CONFIG_CGROUP_BCACHE #ifdef CONFIG_CGROUP_BCACHE
mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass); mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
#endif #endif
...@@ -210,21 +210,21 @@ void bch_mark_cache_readahead(struct search *s) ...@@ -210,21 +210,21 @@ void bch_mark_cache_readahead(struct search *s)
{ {
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_readaheads); atomic_inc(&dc->accounting.collector.cache_readaheads);
atomic_inc(&s->op.c->accounting.collector.cache_readaheads); atomic_inc(&s->c->accounting.collector.cache_readaheads);
} }
void bch_mark_cache_miss_collision(struct search *s) void bch_mark_cache_miss_collision(struct search *s)
{ {
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_inc(&dc->accounting.collector.cache_miss_collisions); atomic_inc(&dc->accounting.collector.cache_miss_collisions);
atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions); atomic_inc(&s->c->accounting.collector.cache_miss_collisions);
} }
void bch_mark_sectors_bypassed(struct search *s, int sectors) void bch_mark_sectors_bypassed(struct search *s, int sectors)
{ {
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
atomic_add(sectors, &dc->accounting.collector.sectors_bypassed); atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed); atomic_add(sectors, &s->c->accounting.collector.sectors_bypassed);
} }
void bch_cache_accounting_init(struct cache_accounting *acc, void bch_cache_accounting_init(struct cache_accounting *acc,
......
...@@ -1493,11 +1493,10 @@ static void run_cache_set(struct cache_set *c) ...@@ -1493,11 +1493,10 @@ static void run_cache_set(struct cache_set *c)
const char *err = "cannot allocate memory"; const char *err = "cannot allocate memory";
struct cached_dev *dc, *t; struct cached_dev *dc, *t;
struct cache *ca; struct cache *ca;
struct closure cl;
unsigned i; unsigned i;
struct btree_op op; closure_init_stack(&cl);
bch_btree_op_init_stack(&op);
op.lock = SHRT_MAX;
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets; c->nbuckets += ca->sb.nbuckets;
...@@ -1508,7 +1507,7 @@ static void run_cache_set(struct cache_set *c) ...@@ -1508,7 +1507,7 @@ static void run_cache_set(struct cache_set *c)
struct jset *j; struct jset *j;
err = "cannot allocate memory for journal"; err = "cannot allocate memory for journal";
if (bch_journal_read(c, &journal, &op)) if (bch_journal_read(c, &journal))
goto err; goto err;
pr_debug("btree_journal_read() done"); pr_debug("btree_journal_read() done");
...@@ -1543,12 +1542,12 @@ static void run_cache_set(struct cache_set *c) ...@@ -1543,12 +1542,12 @@ static void run_cache_set(struct cache_set *c)
list_del_init(&c->root->list); list_del_init(&c->root->list);
rw_unlock(true, c->root); rw_unlock(true, c->root);
err = uuid_read(c, j, &op.cl); err = uuid_read(c, j, &cl);
if (err) if (err)
goto err; goto err;
err = "error in recovery"; err = "error in recovery";
if (bch_btree_check(c, &op)) if (bch_btree_check(c))
goto err; goto err;
bch_journal_mark(c, &journal); bch_journal_mark(c, &journal);
...@@ -1580,7 +1579,7 @@ static void run_cache_set(struct cache_set *c) ...@@ -1580,7 +1579,7 @@ static void run_cache_set(struct cache_set *c)
if (j->version < BCACHE_JSET_VERSION_UUID) if (j->version < BCACHE_JSET_VERSION_UUID)
__uuid_write(c); __uuid_write(c);
bch_journal_replay(c, &journal, &op); bch_journal_replay(c, &journal);
} else { } else {
pr_notice("invalidating existing data"); pr_notice("invalidating existing data");
...@@ -1616,7 +1615,7 @@ static void run_cache_set(struct cache_set *c) ...@@ -1616,7 +1615,7 @@ static void run_cache_set(struct cache_set *c)
goto err; goto err;
bkey_copy_key(&c->root->key, &MAX_KEY); bkey_copy_key(&c->root->key, &MAX_KEY);
bch_btree_node_write(c->root, &op.cl); bch_btree_node_write(c->root, &cl);
bch_btree_set_root(c->root); bch_btree_set_root(c->root);
rw_unlock(true, c->root); rw_unlock(true, c->root);
...@@ -1629,14 +1628,14 @@ static void run_cache_set(struct cache_set *c) ...@@ -1629,14 +1628,14 @@ static void run_cache_set(struct cache_set *c)
SET_CACHE_SYNC(&c->sb, true); SET_CACHE_SYNC(&c->sb, true);
bch_journal_next(&c->journal); bch_journal_next(&c->journal);
bch_journal_meta(c, &op.cl); bch_journal_meta(c, &cl);
} }
err = "error starting gc thread"; err = "error starting gc thread";
if (bch_gc_thread_start(c)) if (bch_gc_thread_start(c))
goto err; goto err;
closure_sync(&op.cl); closure_sync(&cl);
c->sb.last_mount = get_seconds(); c->sb.last_mount = get_seconds();
bcache_write_super(c); bcache_write_super(c);
...@@ -1647,7 +1646,7 @@ static void run_cache_set(struct cache_set *c) ...@@ -1647,7 +1646,7 @@ static void run_cache_set(struct cache_set *c)
return; return;
err: err:
closure_sync(&op.cl); closure_sync(&cl);
/* XXX: test this, it's broken */ /* XXX: test this, it's broken */
bch_cache_set_error(c, err); bch_cache_set_error(c, err);
} }
......
...@@ -155,7 +155,7 @@ static void write_dirty_finish(struct closure *cl) ...@@ -155,7 +155,7 @@ static void write_dirty_finish(struct closure *cl)
for (i = 0; i < KEY_PTRS(&w->key); i++) for (i = 0; i < KEY_PTRS(&w->key); i++)
atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin); atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
bch_btree_insert(&op, dc->disk.c, &keys); bch_btree_insert(&op, dc->disk.c, &keys, NULL);
closure_sync(&op.cl); closure_sync(&op.cl);
if (op.insert_collision) if (op.insert_collision)
...@@ -433,9 +433,16 @@ static int bch_writeback_thread(void *arg) ...@@ -433,9 +433,16 @@ static int bch_writeback_thread(void *arg)
/* Init */ /* Init */
static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b, struct sectors_dirty_init {
struct btree_op op;
unsigned inode;
};
static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
struct bkey *k) struct bkey *k)
{ {
struct sectors_dirty_init *op = container_of(_op,
struct sectors_dirty_init, op);
if (KEY_INODE(k) > op->inode) if (KEY_INODE(k) > op->inode)
return MAP_DONE; return MAP_DONE;
...@@ -448,12 +455,12 @@ static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b, ...@@ -448,12 +455,12 @@ static int sectors_dirty_init_fn(struct btree_op *op, struct btree *b,
void bch_sectors_dirty_init(struct cached_dev *dc) void bch_sectors_dirty_init(struct cached_dev *dc)
{ {
struct btree_op op; struct sectors_dirty_init op;
bch_btree_op_init_stack(&op); bch_btree_op_init_stack(&op.op);
op.inode = dc->disk.id; op.inode = dc->disk.id;
bch_btree_map_keys(&op, dc->disk.c, &KEY(op.inode, 0, 0), bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0); sectors_dirty_init_fn, 0);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment