Commit ac2ccddc authored by Kent Overstreet's avatar Kent Overstreet

bcachefs: Drop some anonymous structs, unions

Rust bindgen doesn't cope well with anonymous structs and unions. This
patch drops the fancy anonymous structs & unions in bkey_i that let us
use the same helpers for bkey_i and bkey_packed; since bkey_packed is an
internal type that's never exposed to outside code, it's only a minor
inconvenienc.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 45dd05b3
......@@ -294,16 +294,8 @@ enum bch_bkey_fields {
struct bkey_i {
__u64 _data[0];
union {
struct {
/* Size of combined key and value, in u64s */
__u8 u64s;
};
struct {
struct bkey k;
struct bch_val v;
};
};
struct bkey k;
struct bch_val v;
};
#define KEY(_inode, _offset, _size) \
......
......@@ -42,7 +42,12 @@ struct bkey_s {
};
};
#define bkey_next(_k) vstruct_next(_k)
#define bkey_p_next(_k) vstruct_next(_k)
static inline struct bkey_i *bkey_next(struct bkey_i *k)
{
return (struct bkey_i *) (k->_data + k->k.u64s);
}
#define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s)
......
......@@ -46,7 +46,7 @@ static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
BUG_ON(!iter->used);
i->k = bkey_next(i->k);
i->k = bkey_p_next(i->k);
BUG_ON(i->k > i->end);
......@@ -108,7 +108,7 @@ bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
!should_drop_next_key(iter)) {
bkey_copy(out, k);
btree_keys_account_key_add(&nr, 0, out);
out = bkey_next(out);
out = bkey_p_next(out);
}
sort_iter_advance(iter, key_sort_fix_overlapping_cmp);
......@@ -147,7 +147,7 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
out->needs_whiteout = false;
btree_keys_account_key_add(&nr, 0, out);
out = bkey_next(out);
out = bkey_p_next(out);
}
dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
......@@ -194,7 +194,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst,
bkey_copy(out, in);
}
out->needs_whiteout |= needs_whiteout;
out = bkey_next(out);
out = bkey_p_next(out);
}
return (u64 *) out - (u64 *) dst;
......
......@@ -66,7 +66,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
for (_k = i->start;
_k < vstruct_last(i);
_k = _n) {
_n = bkey_next(_k);
_n = bkey_p_next(_k);
k = bkey_disassemble(b, _k, &uk);
......@@ -539,7 +539,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b,
rw_aux_tree(b, t)[j - 1].offset);
}
k = bkey_next(k);
k = bkey_p_next(k);
BUG_ON(k >= btree_bkey_last(b, t));
}
}
......@@ -730,7 +730,7 @@ static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
/* First we figure out where the first key in each cacheline is */
eytzinger1_for_each(j, t->size - 1) {
while (bkey_to_cacheline(b, t, k) < cacheline)
prev = k, k = bkey_next(k);
prev = k, k = bkey_p_next(k);
if (k >= btree_bkey_last(b, t)) {
/* XXX: this path sucks */
......@@ -747,7 +747,7 @@ static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
}
while (k != btree_bkey_last(b, t))
prev = k, k = bkey_next(k);
prev = k, k = bkey_p_next(k);
if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
bkey_init(&min_key.k);
......@@ -885,7 +885,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
while ((p = __bkey_prev(b, t, k)) && !ret) {
for (i = p; i != k; i = bkey_next(i))
for (i = p; i != k; i = bkey_p_next(i))
if (i->type >= min_key_type)
ret = i;
......@@ -896,10 +896,10 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
BUG_ON(ret >= orig_k);
for (i = ret
? bkey_next(ret)
? bkey_p_next(ret)
: btree_bkey_first(b, t);
i != orig_k;
i = bkey_next(i))
i = bkey_p_next(i))
BUG_ON(i->type >= min_key_type);
}
......@@ -971,7 +971,7 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
struct bkey_packed *k = start;
while (1) {
k = bkey_next(k);
k = bkey_p_next(k);
if (k == end)
break;
......@@ -1205,12 +1205,12 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b,
while (m != btree_bkey_last(b, t) &&
bkey_iter_cmp_p_or_unp(b, m,
lossy_packed_search, search) < 0)
m = bkey_next(m);
m = bkey_p_next(m);
if (!packed_search)
while (m != btree_bkey_last(b, t) &&
bkey_iter_pos_cmp(b, m, search) < 0)
m = bkey_next(m);
m = bkey_p_next(m);
if (bch2_expensive_debug_checks) {
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
......
......@@ -211,7 +211,7 @@ static inline size_t btree_aux_data_u64s(const struct btree *b)
#define bset_tree_for_each_key(_b, _t, _k) \
for (_k = btree_bkey_first(_b, _t); \
_k != btree_bkey_last(_b, _t); \
_k = bkey_next(_k))
_k = bkey_p_next(_k))
static inline bool bset_has_ro_aux_tree(struct bset_tree *t)
{
......
......@@ -77,9 +77,9 @@ static void verify_no_dups(struct btree *b,
if (start == end)
return;
for (p = start, k = bkey_next(start);
for (p = start, k = bkey_p_next(start);
k != end;
p = k, k = bkey_next(k)) {
p = k, k = bkey_p_next(k)) {
struct bkey l = bkey_unpack_key(b, p);
struct bkey r = bkey_unpack_key(b, k);
......@@ -92,7 +92,7 @@ static void set_needs_whiteout(struct bset *i, int v)
{
struct bkey_packed *k;
for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
k->needs_whiteout = v;
}
......@@ -175,7 +175,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
for (k = unwritten_whiteouts_start(c, b);
k != unwritten_whiteouts_end(c, b);
k = bkey_next(k))
k = bkey_p_next(k))
*--ptrs = k;
sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
......@@ -184,7 +184,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
while (ptrs != ptrs_end) {
bkey_copy(k, *ptrs);
k = bkey_next(k);
k = bkey_p_next(k);
ptrs++;
}
......@@ -256,11 +256,11 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
out = i->start;
for (k = start; k != end; k = n) {
n = bkey_next(k);
n = bkey_p_next(k);
if (!bkey_deleted(k)) {
bkey_copy(out, k);
out = bkey_next(out);
out = bkey_p_next(out);
} else {
BUG_ON(k->needs_whiteout);
}
......@@ -652,7 +652,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
struct bset *i = bset(b, t);
struct bkey_packed *k;
for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
break;
......@@ -665,7 +665,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
set_btree_bset_end(b, t);
}
for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
break;
......@@ -843,7 +843,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
struct bkey_s u;
struct bkey tmp;
if (btree_err_on(bkey_next(k) > vstruct_last(i),
if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
BTREE_ERR_FIXABLE, c, NULL, b, i,
"key extends past end of bset")) {
i->u64s = cpu_to_le16((u64 *) k - i->_data);
......@@ -854,7 +854,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
BTREE_ERR_FIXABLE, c, NULL, b, i,
"invalid bkey format %u", k->format)) {
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k),
memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k);
continue;
}
......@@ -878,7 +878,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k),
memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k);
continue;
}
......@@ -901,14 +901,14 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf)) {
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k),
memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k);
continue;
}
}
prev = k;
k = bkey_next(k);
k = bkey_p_next(k);
}
fsck_err:
printbuf_exit(&buf);
......@@ -1139,7 +1139,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_keys_account_key_drop(&b->nr, 0, k);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k),
memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k);
set_btree_bset_end(b, b->set);
continue;
......@@ -1151,7 +1151,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
bp.v->mem_ptr = 0;
}
k = bkey_next(k);
k = bkey_p_next(k);
}
bch2_bset_build_aux_tree(b, b->set, false);
......
......@@ -776,14 +776,14 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
struct bkey_cached *ck = (void *) path->l[0].b;
bool kick_reclaim = false;
BUG_ON(insert->u64s > ck->u64s);
BUG_ON(insert->k.u64s > ck->u64s);
if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
int difference;
BUG_ON(jset_u64s(insert->u64s) > trans->journal_preres.u64s);
BUG_ON(jset_u64s(insert->k.u64s) > trans->journal_preres.u64s);
difference = jset_u64s(insert->u64s) - ck->res.u64s;
difference = jset_u64s(insert->k.u64s) - ck->res.u64s;
if (difference > 0) {
trans->journal_preres.u64s -= difference;
ck->res.u64s += difference;
......
......@@ -1413,7 +1413,7 @@ static void __btree_split_node(struct btree_update *as,
out[i]->needs_whiteout = false;
btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
out[i] = bkey_next(out[i]);
out[i] = bkey_p_next(out[i]);
}
for (i = 0; i < 2; i++) {
......@@ -2445,7 +2445,7 @@ bch2_btree_roots_to_journal_entries(struct bch_fs *c,
BCH_JSET_ENTRY_btree_root,
i, c->btree_roots[i].level,
&c->btree_roots[i].key,
c->btree_roots[i].key.u64s);
c->btree_roots[i].key.k.u64s);
end = vstruct_next(end);
}
......
......@@ -153,7 +153,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
BUG_ON(b->nsets != 1);
for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_next(k))
for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_p_next(k))
if (k->type == KEY_TYPE_btree_ptr_v2) {
struct bch_btree_ptr_v2 *v = (void *) bkeyp_val(&b->format, k);
v->mem_ptr = 0;
......
......@@ -633,7 +633,7 @@ static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr
memcpy((void *) &k->v + bkey_val_bytes(&k->k),
&ptr,
sizeof(ptr));
k->u64s++;
k->k.u64s++;
break;
default:
BUG();
......
......@@ -733,7 +733,7 @@ static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
}
if (dst != src)
memmove_u64s_down(dst, src, src->u64s);
memmove_u64s_down(dst, src, src->k.u64s);
dst = bkey_next(dst);
}
......
......@@ -356,7 +356,7 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
struct bkey_i *k;
bool first = true;
vstruct_for_each(entry, k) {
jset_entry_for_each_key(entry, k) {
if (!first) {
prt_newline(out);
prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
......
......@@ -40,9 +40,14 @@ static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
(entry = __jset_entry_type_next(jset, entry, type)); \
entry = vstruct_next(entry))
#define for_each_jset_key(k, _n, entry, jset) \
for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys) \
vstruct_for_each_safe(entry, k, _n)
#define jset_entry_for_each_key(_e, _k) \
for (_k = (_e)->start; \
_k < vstruct_last(_e); \
_k = bkey_next(_k))
#define for_each_jset_key(k, entry, jset) \
for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys)\
jset_entry_for_each_key(entry, k)
int bch2_journal_entry_validate(struct bch_fs *, struct jset *,
struct jset_entry *, unsigned, int, int);
......
......@@ -481,7 +481,7 @@ static int journal_keys_sort(struct bch_fs *c)
struct genradix_iter iter;
struct journal_replay *i, **_i;
struct jset_entry *entry;
struct bkey_i *k, *_n;
struct bkey_i *k;
struct journal_keys *keys = &c->journal_keys;
struct journal_key *src, *dst;
size_t nr_keys = 0;
......@@ -492,7 +492,7 @@ static int journal_keys_sort(struct bch_fs *c)
if (!i || i->ignore)
continue;
for_each_jset_key(k, _n, entry, &i->j)
for_each_jset_key(k, entry, &i->j)
nr_keys++;
}
......@@ -511,7 +511,7 @@ static int journal_keys_sort(struct bch_fs *c)
if (!i || i->ignore)
continue;
for_each_jset_key(k, _n, entry, &i->j)
for_each_jset_key(k, entry, &i->j)
keys->d[keys->nr++] = (struct journal_key) {
.btree_id = entry->btree_id,
.level = entry->level,
......@@ -871,7 +871,7 @@ static int verify_superblock_clean(struct bch_fs *c,
IS_ERR(k1) ||
IS_ERR(k2) ||
k1->k.u64s != k2->k.u64s ||
memcmp(k1, k2, bkey_bytes(k1)) ||
memcmp(k1, k2, bkey_bytes(&k1->k)) ||
l1 != l2, c,
"superblock btree root %u doesn't match journal after clean shutdown\n"
"sb: l=%u %s\n"
......
......@@ -233,7 +233,13 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
orig->k.type = KEY_TYPE_reflink_p;
r_p = bkey_i_to_reflink_p(orig);
set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
/* FORTIFY_SOURCE is broken here, and doesn't provide unsafe_memset() */
#if !defined(__NO_FORTIFY) && defined(__OPTIMIZE__) && defined(CONFIG_FORTIFY_SOURCE)
__underlying_memset(&r_p->v, 0, sizeof(r_p->v));
#else
memset(&r_p->v, 0, sizeof(r_p->v));
#endif
r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment