Commit 8bfec4f3 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-sockaddr-wide-store'

Stanislav Fomichev says:

====================
Clang can generate 8-byte stores for user_ip6 & msg_src_ip6,
let's support that on the verifier side.

v3:
* fix comments spelling an -> and (Andrii Nakryiko)

v2:
* Add simple cover letter (Yonghong Song)
* Update comments (Yonghong Song)
* Remove [4] selftests (Yonghong Song)
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents d2850ce0 76d95077
......@@ -747,6 +747,12 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
return size <= size_default && (size & (size - 1)) == 0;
}
#define bpf_ctx_wide_store_ok(off, size, type, field) \
(size == sizeof(__u64) && \
off >= offsetof(type, field) && \
off + sizeof(__u64) <= offsetofend(type, field) && \
off % sizeof(__u64) == 0)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
......
......@@ -3247,7 +3247,7 @@ struct bpf_sock_addr {
__u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order.
*/
__u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
__u32 user_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
* Stored in network byte order.
*/
__u32 user_port; /* Allows 4-byte read and write.
......@@ -3256,10 +3256,10 @@ struct bpf_sock_addr {
__u32 family; /* Allows 4-byte read, but no write */
__u32 type; /* Allows 4-byte read, but no write */
__u32 protocol; /* Allows 4-byte read, but no write */
__u32 msg_src_ip4; /* Allows 1,2,4-byte read an 4-byte write.
__u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order.
*/
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
* Stored in network byte order.
*/
__bpf_md_ptr(struct bpf_sock *, sk);
......
......@@ -6890,6 +6890,16 @@ static bool sock_addr_is_valid_access(int off, int size,
if (!bpf_ctx_narrow_access_ok(off, size, size_default))
return false;
} else {
if (bpf_ctx_wide_store_ok(off, size,
struct bpf_sock_addr,
user_ip6))
return true;
if (bpf_ctx_wide_store_ok(off, size,
struct bpf_sock_addr,
msg_src_ip6))
return true;
if (size != size_default)
return false;
}
......@@ -7730,9 +7740,6 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
/* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
* SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
*
* It doesn't support SIZE argument though since narrow stores are not
* supported for now.
*
* In addition it uses Temporary Field TF (member of struct S) as the 3rd
* "register" since two registers available in convert_ctx_access are not
* enough: we can't override neither SRC, since it contains value to store, nor
......@@ -7740,7 +7747,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
* instructions. But we need a temporary place to save pointer to nested
* structure whose field we want to store to.
*/
#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \
#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) \
do { \
int tmp_reg = BPF_REG_9; \
if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
......@@ -7751,8 +7758,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
offsetof(S, TF)); \
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
si->dst_reg, offsetof(S, F)); \
*insn++ = BPF_STX_MEM( \
BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \
*insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \
bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
target_size) \
+ OFF); \
......@@ -7764,8 +7770,8 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type,
TF) \
do { \
if (type == BPF_WRITE) { \
SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \
TF); \
SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, \
OFF, TF); \
} else { \
SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
S, NS, F, NF, SIZE, OFF); \
......
......@@ -3244,7 +3244,7 @@ struct bpf_sock_addr {
__u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order.
*/
__u32 user_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
__u32 user_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
* Stored in network byte order.
*/
__u32 user_port; /* Allows 4-byte read and write.
......@@ -3253,10 +3253,10 @@ struct bpf_sock_addr {
__u32 family; /* Allows 4-byte read, but no write */
__u32 type; /* Allows 4-byte read, but no write */
__u32 protocol; /* Allows 4-byte read, but no write */
__u32 msg_src_ip4; /* Allows 1,2,4-byte read an 4-byte write.
__u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
* Stored in network byte order.
*/
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read an 4-byte write.
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
* Stored in network byte order.
*/
__bpf_md_ptr(struct bpf_sock *, sk);
......
......@@ -105,6 +105,7 @@ struct bpf_test {
__u64 data64[TEST_DATA_LEN / 8];
};
} retvals[MAX_TEST_RUNS];
enum bpf_attach_type expected_attach_type;
};
/* Note we want this to be 64 bit aligned so that the end of our array is
......@@ -850,6 +851,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
int fd_prog, expected_ret, alignment_prevented_execution;
int prog_len, prog_type = test->prog_type;
struct bpf_insn *prog = test->insns;
struct bpf_load_program_attr attr;
int run_errs, run_successes;
int map_fds[MAX_NR_MAPS];
const char *expected_err;
......@@ -881,8 +883,17 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
pflags |= BPF_F_STRICT_ALIGNMENT;
if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
pflags |= BPF_F_ANY_ALIGNMENT;
fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 4);
memset(&attr, 0, sizeof(attr));
attr.prog_type = prog_type;
attr.expected_attach_type = test->expected_attach_type;
attr.insns = prog;
attr.insns_cnt = prog_len;
attr.license = "GPL";
attr.log_level = 4;
attr.prog_flags = pflags;
fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
printf("SKIP (unsupported program type %d)\n", prog_type);
skips++;
......@@ -912,7 +923,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
printf("FAIL\nUnexpected success to load!\n");
goto fail_log;
}
if (!strstr(bpf_vlog, expected_err)) {
if (!expected_err || !strstr(bpf_vlog, expected_err)) {
printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
expected_err, bpf_vlog);
goto fail_log;
......
#define BPF_SOCK_ADDR(field, off, res, err) \
{ \
"wide store to bpf_sock_addr." #field "[" #off "]", \
.insns = { \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, \
offsetof(struct bpf_sock_addr, field[off])), \
BPF_EXIT_INSN(), \
}, \
.result = res, \
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, \
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, \
.errstr = err, \
}
/* user_ip6[0] is u64 aligned */
BPF_SOCK_ADDR(user_ip6, 0, ACCEPT,
NULL),
BPF_SOCK_ADDR(user_ip6, 1, REJECT,
"invalid bpf_context access off=12 size=8"),
BPF_SOCK_ADDR(user_ip6, 2, ACCEPT,
NULL),
BPF_SOCK_ADDR(user_ip6, 3, REJECT,
"invalid bpf_context access off=20 size=8"),
/* msg_src_ip6[0] is _not_ u64 aligned */
BPF_SOCK_ADDR(msg_src_ip6, 0, REJECT,
"invalid bpf_context access off=44 size=8"),
BPF_SOCK_ADDR(msg_src_ip6, 1, ACCEPT,
NULL),
BPF_SOCK_ADDR(msg_src_ip6, 2, REJECT,
"invalid bpf_context access off=52 size=8"),
BPF_SOCK_ADDR(msg_src_ip6, 3, REJECT,
"invalid bpf_context access off=56 size=8"),
#undef BPF_SOCK_ADDR
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment