Commit 94932a08 authored by Alan Huang's avatar Alan Huang Committed by Kent Overstreet

bcachefs: Refactor bch2_bset_fix_lookup_table

bch2_bset_fix_lookup_table is too complicated to be easily understood,
the comment "l now > where" there is also incorrect when where ==
t->end_offset. This patch therefore refactor the function, the idea is
that when where >= rw_aux_tree(b, t)[t->size - 1].offset, we don't need
to adjust the rw aux tree.
Signed-off-by: default avatarAlan Huang <mmpgouride@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent f1625637
...@@ -885,6 +885,38 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b, ...@@ -885,6 +885,38 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
/* Insert */ /* Insert */
static void rw_aux_tree_insert_entry(struct btree *b,
struct bset_tree *t,
unsigned idx)
{
EBUG_ON(!idx || idx > t->size);
struct bkey_packed *start = rw_aux_to_bkey(b, t, idx - 1);
struct bkey_packed *end = idx < t->size
? rw_aux_to_bkey(b, t, idx)
: btree_bkey_last(b, t);
if (t->size < bset_rw_tree_capacity(b, t) &&
(void *) end - (void *) start > L1_CACHE_BYTES) {
struct bkey_packed *k = start;
while (1) {
k = bkey_p_next(k);
if (k == end)
break;
if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
memmove(&rw_aux_tree(b, t)[idx + 1],
&rw_aux_tree(b, t)[idx],
(void *) &rw_aux_tree(b, t)[t->size] -
(void *) &rw_aux_tree(b, t)[idx]);
t->size++;
rw_aux_tree_set(b, t, idx, k);
break;
}
}
}
}
static void bch2_bset_fix_lookup_table(struct btree *b, static void bch2_bset_fix_lookup_table(struct btree *b,
struct bset_tree *t, struct bset_tree *t,
struct bkey_packed *_where, struct bkey_packed *_where,
...@@ -892,78 +924,54 @@ static void bch2_bset_fix_lookup_table(struct btree *b, ...@@ -892,78 +924,54 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
unsigned new_u64s) unsigned new_u64s)
{ {
int shift = new_u64s - clobber_u64s; int shift = new_u64s - clobber_u64s;
unsigned l, j, where = __btree_node_key_to_offset(b, _where); unsigned idx, j, where = __btree_node_key_to_offset(b, _where);
EBUG_ON(bset_has_ro_aux_tree(t)); EBUG_ON(bset_has_ro_aux_tree(t));
if (!bset_has_rw_aux_tree(t)) if (!bset_has_rw_aux_tree(t))
return; return;
/* returns first entry >= where */ if (where > rw_aux_tree(b, t)[t->size - 1].offset) {
l = rw_aux_tree_bsearch(b, t, where); rw_aux_tree_insert_entry(b, t, t->size);
goto verify;
if (!l) /* never delete first entry */ }
l++;
else if (l < t->size &&
where < t->end_offset &&
rw_aux_tree(b, t)[l].offset == where)
rw_aux_tree_set(b, t, l++, _where);
/* l now > where */
for (j = l; /* returns first entry >= where */
j < t->size && idx = rw_aux_tree_bsearch(b, t, where);
rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
j++)
;
if (j < t->size && if (rw_aux_tree(b, t)[idx].offset == where) {
rw_aux_tree(b, t)[j].offset + shift == if (!idx) { /* never delete first entry */
rw_aux_tree(b, t)[l - 1].offset) idx++;
j++; } else if (where < t->end_offset) {
rw_aux_tree_set(b, t, idx++, _where);
} else {
EBUG_ON(where != t->end_offset);
rw_aux_tree_insert_entry(b, t, --t->size);
goto verify;
}
}
memmove(&rw_aux_tree(b, t)[l], EBUG_ON(idx < t->size && rw_aux_tree(b, t)[idx].offset <= where);
&rw_aux_tree(b, t)[j], if (idx < t->size &&
rw_aux_tree(b, t)[idx].offset + shift ==
rw_aux_tree(b, t)[idx - 1].offset) {
memmove(&rw_aux_tree(b, t)[idx],
&rw_aux_tree(b, t)[idx + 1],
(void *) &rw_aux_tree(b, t)[t->size] - (void *) &rw_aux_tree(b, t)[t->size] -
(void *) &rw_aux_tree(b, t)[j]); (void *) &rw_aux_tree(b, t)[idx + 1]);
t->size -= j - l; t->size -= 1;
}
for (j = l; j < t->size; j++) for (j = idx; j < t->size; j++)
rw_aux_tree(b, t)[j].offset += shift; rw_aux_tree(b, t)[j].offset += shift;
EBUG_ON(l < t->size && EBUG_ON(idx < t->size &&
rw_aux_tree(b, t)[l].offset == rw_aux_tree(b, t)[idx].offset ==
rw_aux_tree(b, t)[l - 1].offset); rw_aux_tree(b, t)[idx - 1].offset);
if (t->size < bset_rw_tree_capacity(b, t) &&
(l < t->size
? rw_aux_tree(b, t)[l].offset
: t->end_offset) -
rw_aux_tree(b, t)[l - 1].offset >
L1_CACHE_BYTES / sizeof(u64)) {
struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
struct bkey_packed *end = l < t->size
? rw_aux_to_bkey(b, t, l)
: btree_bkey_last(b, t);
struct bkey_packed *k = start;
while (1) {
k = bkey_p_next(k);
if (k == end)
break;
if ((void *) k - (void *) start >= L1_CACHE_BYTES) { rw_aux_tree_insert_entry(b, t, idx);
memmove(&rw_aux_tree(b, t)[l + 1],
&rw_aux_tree(b, t)[l],
(void *) &rw_aux_tree(b, t)[t->size] -
(void *) &rw_aux_tree(b, t)[l]);
t->size++;
rw_aux_tree_set(b, t, l, k);
break;
}
}
}
verify:
bch2_bset_verify_rw_aux_tree(b, t); bch2_bset_verify_rw_aux_tree(b, t);
bset_aux_tree_verify(b); bset_aux_tree_verify(b);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment