Commit 0242130f authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Refactor bkey_cached_alloc() path

Clean up the arguments passed and make them more consistent.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent da4474f2
...@@ -151,33 +151,34 @@ static void bkey_cached_free_fast(struct btree_key_cache *bc, ...@@ -151,33 +151,34 @@ static void bkey_cached_free_fast(struct btree_key_cache *bc,
} }
static struct bkey_cached * static struct bkey_cached *
bkey_cached_alloc(struct btree_trans *trans, bkey_cached_alloc(struct btree_trans *trans)
struct btree_key_cache *c)
{ {
struct bch_fs *c = trans->c;
struct btree_key_cache *bc = &c->btree_key_cache;
struct bkey_cached *ck = NULL; struct bkey_cached *ck = NULL;
struct btree_key_cache_freelist *f; struct btree_key_cache_freelist *f;
preempt_disable(); preempt_disable();
f = this_cpu_ptr(c->pcpu_freed); f = this_cpu_ptr(bc->pcpu_freed);
if (f->nr) if (f->nr)
ck = f->objs[--f->nr]; ck = f->objs[--f->nr];
preempt_enable(); preempt_enable();
if (!ck) { if (!ck) {
mutex_lock(&c->lock); mutex_lock(&bc->lock);
preempt_disable(); preempt_disable();
f = this_cpu_ptr(c->pcpu_freed); f = this_cpu_ptr(bc->pcpu_freed);
while (!list_empty(&c->freed) && while (!list_empty(&bc->freed) &&
f->nr < ARRAY_SIZE(f->objs) / 2) { f->nr < ARRAY_SIZE(f->objs) / 2) {
ck = list_last_entry(&c->freed, struct bkey_cached, list); ck = list_last_entry(&bc->freed, struct bkey_cached, list);
list_del_init(&ck->list); list_del_init(&ck->list);
f->objs[f->nr++] = ck; f->objs[f->nr++] = ck;
} }
ck = f->nr ? f->objs[--f->nr] : NULL; ck = f->nr ? f->objs[--f->nr] : NULL;
preempt_enable(); preempt_enable();
mutex_unlock(&c->lock); mutex_unlock(&bc->lock);
} }
if (ck) { if (ck) {
...@@ -185,14 +186,14 @@ bkey_cached_alloc(struct btree_trans *trans, ...@@ -185,14 +186,14 @@ bkey_cached_alloc(struct btree_trans *trans,
ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent); ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_intent);
if (unlikely(ret)) { if (unlikely(ret)) {
bkey_cached_move_to_freelist(c, ck); bkey_cached_move_to_freelist(bc, ck);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_write); ret = btree_node_lock_nopath(trans, &ck->c, SIX_LOCK_write);
if (unlikely(ret)) { if (unlikely(ret)) {
six_unlock_intent(&ck->c.lock); six_unlock_intent(&ck->c.lock);
bkey_cached_move_to_freelist(c, ck); bkey_cached_move_to_freelist(bc, ck);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -239,15 +240,14 @@ bkey_cached_reuse(struct btree_key_cache *c) ...@@ -239,15 +240,14 @@ bkey_cached_reuse(struct btree_key_cache *c)
static struct bkey_cached * static struct bkey_cached *
btree_key_cache_create(struct btree_trans *trans, btree_key_cache_create(struct btree_trans *trans,
enum btree_id btree_id, struct btree_path *path)
struct bpos pos)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_key_cache *bc = &c->btree_key_cache; struct btree_key_cache *bc = &c->btree_key_cache;
struct bkey_cached *ck; struct bkey_cached *ck;
bool was_new = true; bool was_new = true;
ck = bkey_cached_alloc(trans, bc); ck = bkey_cached_alloc(trans);
if (unlikely(IS_ERR(ck))) if (unlikely(IS_ERR(ck)))
return ck; return ck;
...@@ -255,20 +255,20 @@ btree_key_cache_create(struct btree_trans *trans, ...@@ -255,20 +255,20 @@ btree_key_cache_create(struct btree_trans *trans,
ck = bkey_cached_reuse(bc); ck = bkey_cached_reuse(bc);
if (unlikely(!ck)) { if (unlikely(!ck)) {
bch_err(c, "error allocating memory for key cache item, btree %s", bch_err(c, "error allocating memory for key cache item, btree %s",
bch2_btree_ids[btree_id]); bch2_btree_ids[path->btree_id]);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
was_new = false; was_new = false;
} else { } else {
if (btree_id == BTREE_ID_subvolumes) if (path->btree_id == BTREE_ID_subvolumes)
six_lock_pcpu_alloc(&ck->c.lock); six_lock_pcpu_alloc(&ck->c.lock);
} }
ck->c.level = 0; ck->c.level = 0;
ck->c.btree_id = btree_id; ck->c.btree_id = path->btree_id;
ck->key.btree_id = btree_id; ck->key.btree_id = path->btree_id;
ck->key.pos = pos; ck->key.pos = path->pos;
ck->valid = false; ck->valid = false;
ck->flags = 1U << BKEY_CACHED_ACCESSED; ck->flags = 1U << BKEY_CACHED_ACCESSED;
...@@ -396,7 +396,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path ...@@ -396,7 +396,7 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
retry: retry:
ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos); ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
if (!ck) { if (!ck) {
ck = btree_key_cache_create(trans, path->btree_id, path->pos); ck = btree_key_cache_create(trans, path);
ret = PTR_ERR_OR_ZERO(ck); ret = PTR_ERR_OR_ZERO(ck);
if (ret) if (ret)
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment