Commit c0fc30da authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: __bch2_btree_node_iter_fix() improvements

Being more rigorous about noting when the key the iterator currently
poins to has changed - which should also give us a nice performance
improvement due to not having to check if we have to skip other bsets
backwards as much.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 36e9d698
...@@ -526,6 +526,10 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter, ...@@ -526,6 +526,10 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
unsigned offset = __btree_node_key_to_offset(b, where); unsigned offset = __btree_node_key_to_offset(b, where);
int shift = new_u64s - clobber_u64s; int shift = new_u64s - clobber_u64s;
unsigned old_end = t->end_offset - shift; unsigned old_end = t->end_offset - shift;
unsigned orig_iter_pos = node_iter->data[0].k;
bool iter_current_key_modified =
orig_iter_pos >= offset &&
orig_iter_pos <= offset + clobber_u64s;
btree_node_iter_for_each(node_iter, set) btree_node_iter_for_each(node_iter, set)
if (set->end == old_end) if (set->end == old_end)
...@@ -534,24 +538,18 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter, ...@@ -534,24 +538,18 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
/* didn't find the bset in the iterator - might have to readd it: */ /* didn't find the bset in the iterator - might have to readd it: */
if (new_u64s && if (new_u64s &&
btree_iter_pos_cmp(iter, b, where) > 0) { btree_iter_pos_cmp(iter, b, where) > 0) {
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
bch2_btree_node_iter_push(node_iter, b, where, end); bch2_btree_node_iter_push(node_iter, b, where, end);
goto fixup_done;
if (!b->c.level && } else {
node_iter == &iter->l[0].iter) /* Iterator is after key that changed */
bkey_disassemble(b, goto out_verify;
bch2_btree_node_iter_peek_all(node_iter, b),
&iter->k);
} }
goto iter_current_key_not_modified;
found: found:
set->end = t->end_offset; set->end = t->end_offset;
/* Iterator hasn't gotten to the key that changed yet: */ /* Iterator hasn't gotten to the key that changed yet: */
if (set->k < offset) if (set->k < offset)
return; goto out_verify;
if (new_u64s && if (new_u64s &&
btree_iter_pos_cmp(iter, b, where) > 0) { btree_iter_pos_cmp(iter, b, where) > 0) {
...@@ -561,40 +559,25 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter, ...@@ -561,40 +559,25 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
if (set->k == set->end) if (set->k == set->end)
bch2_btree_node_iter_set_drop(node_iter, set); bch2_btree_node_iter_set_drop(node_iter, set);
} else { } else {
/* Iterator is after key that changed */
set->k = (int) set->k + shift; set->k = (int) set->k + shift;
goto iter_current_key_not_modified; goto out_verify;
} }
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
bch2_btree_node_iter_sort(node_iter, b); bch2_btree_node_iter_sort(node_iter, b);
if (!b->c.level && node_iter == &iter->l[0].iter) { fixup_done:
/* if (node_iter->data[0].k != orig_iter_pos)
* not legal to call bkey_debugcheck() here, because we're iter_current_key_modified = true;
* called midway through the update path after update has been
* marked but before deletes have actually happened:
*/
#if 0
__btree_iter_peek_all(iter, &iter->l[0], &iter->k);
#endif
struct btree_iter_level *l = &iter->l[0];
struct bkey_packed *k =
bch2_btree_node_iter_peek_all(&l->iter, l->b);
if (unlikely(!k))
iter->k.type = KEY_TYPE_deleted;
else
bkey_disassemble(l->b, k, &iter->k);
}
iter_current_key_not_modified:
/* /*
* When a new key is added, and the node iterator now points to that * When a new key is added, and the node iterator now points to that
* key, the iterator might have skipped past deleted keys that should * key, the iterator might have skipped past deleted keys that should
* come after the key the iterator now points to. We have to rewind to * come after the key the iterator now points to. We have to rewind to
* before those deleted keys - otherwise bch2_btree_node_iter_prev_all() * before those deleted keys - otherwise
* breaks: * bch2_btree_node_iter_prev_all() breaks:
*/ */
if (!bch2_btree_node_iter_end(node_iter) && if (!bch2_btree_node_iter_end(node_iter) &&
iter_current_key_modified &&
(b->c.level || (b->c.level ||
(iter->flags & BTREE_ITER_IS_EXTENTS))) { (iter->flags & BTREE_ITER_IS_EXTENTS))) {
struct bset_tree *t; struct bset_tree *t;
...@@ -622,6 +605,22 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter, ...@@ -622,6 +605,22 @@ static void __bch2_btree_node_iter_fix(struct btree_iter *iter,
} }
} }
if (!b->c.level &&
node_iter == &iter->l[0].iter &&
iter_current_key_modified) {
struct bkey_packed *k =
bch2_btree_node_iter_peek_all(node_iter, b);
if (likely(k)) {
bkey_disassemble(b, k, &iter->k);
} else {
/* XXX: for extents, calculate size of hole? */
iter->k.type = KEY_TYPE_deleted;
}
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);
}
out_verify:
bch2_btree_node_iter_verify(node_iter, b); bch2_btree_node_iter_verify(node_iter, b);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment