Commit f33c58fc authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Kill BTREE_INSERT_USE_RESERVE

Now that we have journal watermarks and alloc watermarks unified,
BTREE_INSERT_USE_RESERVE is redundant and can be deleted.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 65db6049
...@@ -1719,7 +1719,8 @@ static int bch2_discard_one_bucket(struct btree_trans *trans, ...@@ -1719,7 +1719,8 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
write: write:
ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
bch2_trans_commit(trans, NULL, NULL, bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL); BCH_WATERMARK_btree|
BTREE_INSERT_NOFAIL);
if (ret) if (ret)
goto out; goto out;
...@@ -1827,7 +1828,8 @@ static int invalidate_one_bucket(struct btree_trans *trans, ...@@ -1827,7 +1828,8 @@ static int invalidate_one_bucket(struct btree_trans *trans,
ret = bch2_trans_update(trans, &alloc_iter, &a->k_i, ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
BTREE_TRIGGER_BUCKET_INVALIDATE) ?: BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
bch2_trans_commit(trans, NULL, NULL, bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_USE_RESERVE|BTREE_INSERT_NOFAIL); BCH_WATERMARK_btree|
BTREE_INSERT_NOFAIL);
if (ret) if (ret)
goto out; goto out;
......
...@@ -1766,6 +1766,10 @@ static void btree_node_write_work(struct work_struct *work) ...@@ -1766,6 +1766,10 @@ static void btree_node_write_work(struct work_struct *work)
} else { } else {
ret = bch2_trans_do(c, NULL, NULL, 0, ret = bch2_trans_do(c, NULL, NULL, 0,
bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key, bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key,
BCH_WATERMARK_reclaim|
BTREE_INSERT_JOURNAL_RECLAIM|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_NOCHECK_RW,
!wbio->wbio.failed.nr)); !wbio->wbio.failed.nr));
if (ret) if (ret)
goto err; goto err;
......
...@@ -650,7 +650,6 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans, ...@@ -650,7 +650,6 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
bch2_trans_commit(trans, NULL, NULL, bch2_trans_commit(trans, NULL, NULL,
BTREE_INSERT_NOCHECK_RW| BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_NOFAIL| BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
(ck->journal.seq == journal_last_seq(j) (ck->journal.seq == journal_last_seq(j)
? BCH_WATERMARK_reclaim ? BCH_WATERMARK_reclaim
: 0)| : 0)|
......
...@@ -27,7 +27,6 @@ enum btree_insert_flags { ...@@ -27,7 +27,6 @@ enum btree_insert_flags {
__BTREE_INSERT_NOFAIL = BCH_WATERMARK_BITS, __BTREE_INSERT_NOFAIL = BCH_WATERMARK_BITS,
__BTREE_INSERT_NOCHECK_RW, __BTREE_INSERT_NOCHECK_RW,
__BTREE_INSERT_LAZY_RW, __BTREE_INSERT_LAZY_RW,
__BTREE_INSERT_USE_RESERVE,
__BTREE_INSERT_JOURNAL_REPLAY, __BTREE_INSERT_JOURNAL_REPLAY,
__BTREE_INSERT_JOURNAL_RECLAIM, __BTREE_INSERT_JOURNAL_RECLAIM,
__BTREE_INSERT_NOWAIT, __BTREE_INSERT_NOWAIT,
...@@ -37,26 +36,23 @@ enum btree_insert_flags { ...@@ -37,26 +36,23 @@ enum btree_insert_flags {
}; };
/* Don't check for -ENOSPC: */ /* Don't check for -ENOSPC: */
#define BTREE_INSERT_NOFAIL (1 << __BTREE_INSERT_NOFAIL) #define BTREE_INSERT_NOFAIL BIT(__BTREE_INSERT_NOFAIL)
#define BTREE_INSERT_NOCHECK_RW (1 << __BTREE_INSERT_NOCHECK_RW) #define BTREE_INSERT_NOCHECK_RW BIT(__BTREE_INSERT_NOCHECK_RW)
#define BTREE_INSERT_LAZY_RW (1 << __BTREE_INSERT_LAZY_RW) #define BTREE_INSERT_LAZY_RW BIT(__BTREE_INSERT_LAZY_RW)
/* for copygc, or when merging btree nodes */
#define BTREE_INSERT_USE_RESERVE (1 << __BTREE_INSERT_USE_RESERVE)
/* Insert is for journal replay - don't get journal reservations: */ /* Insert is for journal replay - don't get journal reservations: */
#define BTREE_INSERT_JOURNAL_REPLAY (1 << __BTREE_INSERT_JOURNAL_REPLAY) #define BTREE_INSERT_JOURNAL_REPLAY BIT(__BTREE_INSERT_JOURNAL_REPLAY)
/* Insert is being called from journal reclaim path: */ /* Insert is being called from journal reclaim path: */
#define BTREE_INSERT_JOURNAL_RECLAIM (1 << __BTREE_INSERT_JOURNAL_RECLAIM) #define BTREE_INSERT_JOURNAL_RECLAIM BIT(__BTREE_INSERT_JOURNAL_RECLAIM)
/* Don't block on allocation failure (for new btree nodes: */ /* Don't block on allocation failure (for new btree nodes: */
#define BTREE_INSERT_NOWAIT (1 << __BTREE_INSERT_NOWAIT) #define BTREE_INSERT_NOWAIT BIT(__BTREE_INSERT_NOWAIT)
#define BTREE_INSERT_GC_LOCK_HELD (1 << __BTREE_INSERT_GC_LOCK_HELD) #define BTREE_INSERT_GC_LOCK_HELD BIT(__BTREE_INSERT_GC_LOCK_HELD)
#define BCH_HASH_SET_MUST_CREATE (1 << __BCH_HASH_SET_MUST_CREATE) #define BCH_HASH_SET_MUST_CREATE BIT(__BCH_HASH_SET_MUST_CREATE)
#define BCH_HASH_SET_MUST_REPLACE (1 << __BCH_HASH_SET_MUST_REPLACE) #define BCH_HASH_SET_MUST_REPLACE BIT(__BCH_HASH_SET_MUST_REPLACE)
int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *, int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *,
unsigned, unsigned); unsigned, unsigned);
...@@ -80,9 +76,10 @@ int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *, ...@@ -80,9 +76,10 @@ int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
struct btree *, unsigned); struct btree *, unsigned);
void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *); void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *, int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
struct btree *, struct bkey_i *, bool); struct btree *, struct bkey_i *,
int bch2_btree_node_update_key_get_iter(struct btree_trans *, unsigned, bool);
struct btree *, struct bkey_i *, bool); int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
struct bkey_i *, unsigned, bool);
int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id, int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id,
struct bpos, struct bpos); struct bpos, struct bpos);
......
...@@ -246,18 +246,12 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, ...@@ -246,18 +246,12 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
struct open_buckets ob = { .nr = 0 }; struct open_buckets ob = { .nr = 0 };
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 }; struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
unsigned nr_reserve; enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
enum bch_watermark alloc_reserve; unsigned nr_reserve = watermark > BCH_WATERMARK_reclaim
? BTREE_NODE_RESERVE
: 0;
int ret; int ret;
if (flags & BTREE_INSERT_USE_RESERVE) {
nr_reserve = 0;
alloc_reserve = BCH_WATERMARK_btree_copygc;
} else {
nr_reserve = BTREE_NODE_RESERVE;
alloc_reserve = BCH_WATERMARK_btree;
}
mutex_lock(&c->btree_reserve_cache_lock); mutex_lock(&c->btree_reserve_cache_lock);
if (c->btree_reserve_cache_nr > nr_reserve) { if (c->btree_reserve_cache_nr > nr_reserve) {
struct btree_alloc *a = struct btree_alloc *a =
...@@ -279,7 +273,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans, ...@@ -279,7 +273,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
&devs_have, &devs_have,
res->nr_replicas, res->nr_replicas,
c->opts.metadata_replicas_required, c->opts.metadata_replicas_required,
alloc_reserve, 0, cl, &wp); watermark, 0, cl, &wp);
if (unlikely(ret)) if (unlikely(ret))
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -647,11 +641,10 @@ static void btree_update_nodes_written(struct btree_update *as) ...@@ -647,11 +641,10 @@ static void btree_update_nodes_written(struct btree_update *as)
* which may require allocations as well. * which may require allocations as well.
*/ */
ret = commit_do(&trans, &as->disk_res, &journal_seq, ret = commit_do(&trans, &as->disk_res, &journal_seq,
BCH_WATERMARK_reclaim|
BTREE_INSERT_NOFAIL| BTREE_INSERT_NOFAIL|
BTREE_INSERT_NOCHECK_RW| BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_USE_RESERVE| BTREE_INSERT_JOURNAL_RECLAIM,
BTREE_INSERT_JOURNAL_RECLAIM|
BCH_WATERMARK_reclaim,
btree_update_nodes_written_trans(&trans, as)); btree_update_nodes_written_trans(&trans, as));
bch2_trans_unlock(&trans); bch2_trans_unlock(&trans);
...@@ -1049,14 +1042,24 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path, ...@@ -1049,14 +1042,24 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
? BCH_DISK_RESERVATION_NOFAIL : 0; ? BCH_DISK_RESERVATION_NOFAIL : 0;
unsigned nr_nodes[2] = { 0, 0 }; unsigned nr_nodes[2] = { 0, 0 };
unsigned update_level = level; unsigned update_level = level;
int journal_flags = flags & BCH_WATERMARK_MASK; enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
unsigned journal_flags = 0;
int ret = 0; int ret = 0;
u32 restart_count = trans->restart_count; u32 restart_count = trans->restart_count;
BUG_ON(!path->should_be_locked); BUG_ON(!path->should_be_locked);
if (watermark == BCH_WATERMARK_copygc)
watermark = BCH_WATERMARK_btree_copygc;
if (watermark < BCH_WATERMARK_btree)
watermark = BCH_WATERMARK_btree;
flags &= ~BCH_WATERMARK_MASK;
flags |= watermark;
if (flags & BTREE_INSERT_JOURNAL_RECLAIM) if (flags & BTREE_INSERT_JOURNAL_RECLAIM)
journal_flags |= JOURNAL_RES_GET_NONBLOCK; journal_flags |= JOURNAL_RES_GET_NONBLOCK;
journal_flags |= watermark;
while (1) { while (1) {
nr_nodes[!!update_level] += 1 + split; nr_nodes[!!update_level] += 1 + split;
...@@ -1845,9 +1848,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans, ...@@ -1845,9 +1848,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
parent = btree_node_parent(path, b); parent = btree_node_parent(path, b);
as = bch2_btree_update_start(trans, path, level, false, as = bch2_btree_update_start(trans, path, level, false,
BTREE_INSERT_NOFAIL| BTREE_INSERT_NOFAIL|flags);
BTREE_INSERT_USE_RESERVE|
flags);
ret = PTR_ERR_OR_ZERO(as); ret = PTR_ERR_OR_ZERO(as);
if (ret) if (ret)
goto err; goto err;
...@@ -2127,6 +2128,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, ...@@ -2127,6 +2128,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
struct btree_iter *iter, struct btree_iter *iter,
struct btree *b, struct btree *new_hash, struct btree *b, struct btree *new_hash,
struct bkey_i *new_key, struct bkey_i *new_key,
unsigned commit_flags,
bool skip_triggers) bool skip_triggers)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
...@@ -2187,12 +2189,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, ...@@ -2187,12 +2189,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
trans->extra_journal_entries.nr += jset_u64s(new_key->k.u64s); trans->extra_journal_entries.nr += jset_u64s(new_key->k.u64s);
} }
ret = bch2_trans_commit(trans, NULL, NULL, ret = bch2_trans_commit(trans, NULL, NULL, commit_flags);
BTREE_INSERT_NOFAIL|
BTREE_INSERT_NOCHECK_RW|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_JOURNAL_RECLAIM|
BCH_WATERMARK_reclaim);
if (ret) if (ret)
goto err; goto err;
...@@ -2226,7 +2223,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans, ...@@ -2226,7 +2223,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter, int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter,
struct btree *b, struct bkey_i *new_key, struct btree *b, struct bkey_i *new_key,
bool skip_triggers) unsigned commit_flags, bool skip_triggers)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree *new_hash = NULL; struct btree *new_hash = NULL;
...@@ -2256,8 +2253,8 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite ...@@ -2256,8 +2253,8 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
} }
path->intent_ref++; path->intent_ref++;
ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, new_key,
new_key, skip_triggers); commit_flags, skip_triggers);
--path->intent_ref; --path->intent_ref;
if (new_hash) { if (new_hash) {
...@@ -2275,7 +2272,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite ...@@ -2275,7 +2272,7 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
struct btree *b, struct bkey_i *new_key, struct btree *b, struct bkey_i *new_key,
bool skip_triggers) unsigned commit_flags, bool skip_triggers)
{ {
struct btree_iter iter; struct btree_iter iter;
int ret; int ret;
...@@ -2296,7 +2293,8 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans, ...@@ -2296,7 +2293,8 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
BUG_ON(!btree_node_hashed(b)); BUG_ON(!btree_node_hashed(b));
ret = bch2_btree_node_update_key(trans, &iter, b, new_key, skip_triggers); ret = bch2_btree_node_update_key(trans, &iter, b, new_key,
commit_flags, skip_triggers);
out: out:
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
return ret; return ret;
......
...@@ -213,6 +213,9 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f ...@@ -213,6 +213,9 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f
btree_write_buffered_journal_cmp, btree_write_buffered_journal_cmp,
NULL); NULL);
commit_flags &= ~BCH_WATERMARK_MASK;
commit_flags |= BCH_WATERMARK_reclaim;
for (i = keys; i < keys + nr; i++) { for (i = keys; i < keys + nr; i++) {
if (!i->journal_seq) if (!i->journal_seq)
continue; continue;
...@@ -231,8 +234,7 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f ...@@ -231,8 +234,7 @@ int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_f
ret = commit_do(trans, NULL, NULL, ret = commit_do(trans, NULL, NULL,
commit_flags| commit_flags|
BTREE_INSERT_NOFAIL| BTREE_INSERT_NOFAIL|
BTREE_INSERT_JOURNAL_RECLAIM| BTREE_INSERT_JOURNAL_RECLAIM,
BCH_WATERMARK_reclaim,
__bch2_btree_insert(trans, i->btree, &i->k, 0)); __bch2_btree_insert(trans, i->btree, &i->k, 0));
if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret))) if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
break; break;
......
...@@ -458,8 +458,7 @@ int bch2_data_update_init(struct btree_trans *trans, ...@@ -458,8 +458,7 @@ int bch2_data_update_init(struct btree_trans *trans,
m->op.compression_type = m->op.compression_type =
bch2_compression_opt_to_type[io_opts.background_compression ?: bch2_compression_opt_to_type[io_opts.background_compression ?:
io_opts.compression]; io_opts.compression];
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
m->op.watermark = BCH_WATERMARK_copygc;
bkey_for_each_ptr(ptrs, ptr) bkey_for_each_ptr(ptrs, ptr)
percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref); percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
......
...@@ -141,7 +141,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags) ...@@ -141,7 +141,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
break; break;
} }
ret = bch2_btree_node_update_key(&trans, &iter, b, k.k, false); ret = bch2_btree_node_update_key(&trans, &iter, b, k.k, 0, false);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
ret = 0; ret = 0;
continue; continue;
......
...@@ -202,7 +202,7 @@ static int bch2_copygc(struct btree_trans *trans, ...@@ -202,7 +202,7 @@ static int bch2_copygc(struct btree_trans *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct data_update_opts data_opts = { struct data_update_opts data_opts = {
.btree_insert_flags = BTREE_INSERT_USE_RESERVE|BCH_WATERMARK_copygc, .btree_insert_flags = BCH_WATERMARK_copygc,
}; };
move_buckets buckets = { 0 }; move_buckets buckets = { 0 };
struct move_bucket_in_flight *f; struct move_bucket_in_flight *f;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment