Commit bc34dee6 authored by Joanne Koong's avatar Joanne Koong Committed by Andrii Nakryiko

bpf: Dynptr support for ring buffers

Currently, our only way of writing dynamically-sized data into a ring
buffer is through bpf_ringbuf_output but this incurs an extra memcpy
cost. bpf_ringbuf_reserve + bpf_ringbuf_commit avoids this extra
memcpy, but it can only safely support reservation sizes that are
statically known since the verifier cannot guarantee that the bpf
program won’t access memory outside the reserved space.

The bpf_dynptr abstraction allows for dynamically-sized ring buffer
reservations without the extra memcpy.

There are 3 new APIs:

long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr);
void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags);
void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags);

These closely follow the functionalities of the original ringbuf APIs.
For example, all ringbuffer dynptrs that have been reserved must be
either submitted or discarded before the program exits.
Signed-off-by: default avatarJoanne Koong <joannelkoong@gmail.com>
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Acked-by: default avatarAndrii Nakryiko <andrii@kernel.org>
Acked-by: default avatarDavid Vernet <void@manifault.com>
Link: https://lore.kernel.org/bpf/20220523210712.3641569-4-joannelkoong@gmail.com
parent 263ae152
......@@ -395,11 +395,14 @@ enum bpf_type_flag {
/* DYNPTR points to memory local to the bpf program. */
DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS),
/* DYNPTR points to a ringbuf record. */
DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS),
__BPF_TYPE_FLAG_MAX,
__BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
};
#define DYNPTR_TYPE_FLAG_MASK DYNPTR_TYPE_LOCAL
#define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF)
/* Max number of base types. */
#define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS)
......@@ -2231,6 +2234,9 @@ extern const struct bpf_func_proto bpf_ringbuf_reserve_proto;
extern const struct bpf_func_proto bpf_ringbuf_submit_proto;
extern const struct bpf_func_proto bpf_ringbuf_discard_proto;
extern const struct bpf_func_proto bpf_ringbuf_query_proto;
extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto;
extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto;
extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
......@@ -2402,6 +2408,13 @@ enum bpf_dynptr_type {
BPF_DYNPTR_TYPE_INVALID,
/* Points to memory that is local to the bpf program */
BPF_DYNPTR_TYPE_LOCAL,
/* Underlying data is a ringbuf record */
BPF_DYNPTR_TYPE_RINGBUF,
};
void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size);
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
int bpf_dynptr_check_size(u32 size);
#endif /* _LINUX_BPF_H */
......@@ -100,6 +100,8 @@ struct bpf_reg_state {
* for the purpose of tracking that it's freed.
* For PTR_TO_SOCKET this is used to share which pointers retain the
* same reference to the socket, to determine proper reference freeing.
* For stack slots that are dynptrs, this is used to track references to
* the dynptr to determine proper reference freeing.
*/
u32 id;
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
......
......@@ -5189,6 +5189,38 @@ union bpf_attr {
* Return
* 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
* -EINVAL if flags is not 0.
*
* long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
* Description
* Reserve *size* bytes of payload in a ring buffer *ringbuf*
* through the dynptr interface. *flags* must be 0.
*
* Please note that a corresponding bpf_ringbuf_submit_dynptr or
* bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
* reservation fails. This is enforced by the verifier.
* Return
* 0 on success, or a negative error in case of failure.
*
* void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
* Description
* Submit reserved ring buffer sample, pointed to by *data*,
* through the dynptr interface. This is a no-op if the dynptr is
* invalid/null.
*
* For more information on *flags*, please see
* 'bpf_ringbuf_submit'.
* Return
* Nothing. Always succeeds.
*
* void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
* Description
* Discard reserved ring buffer sample through the dynptr
* interface. This is a no-op if the dynptr is invalid/null.
*
* For more information on *flags*, please see
* 'bpf_ringbuf_discard'.
* Return
* Nothing. Always succeeds.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -5389,6 +5421,9 @@ union bpf_attr {
FN(map_lookup_percpu_elem), \
FN(skc_to_mptcp_sock), \
FN(dynptr_from_mem), \
FN(ringbuf_reserve_dynptr), \
FN(ringbuf_submit_dynptr), \
FN(ringbuf_discard_dynptr), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
......@@ -1423,12 +1423,12 @@ static void bpf_dynptr_set_type(struct bpf_dynptr_kern *ptr, enum bpf_dynptr_typ
ptr->size |= type << DYNPTR_TYPE_SHIFT;
}
static int bpf_dynptr_check_size(u32 size)
int bpf_dynptr_check_size(u32 size)
{
return size > DYNPTR_MAX_SIZE ? -E2BIG : 0;
}
static void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size)
{
ptr->data = data;
......@@ -1437,7 +1437,7 @@ static void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
bpf_dynptr_set_type(ptr, type);
}
static void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr)
{
memset(ptr, 0, sizeof(*ptr));
}
......@@ -1523,6 +1523,12 @@ bpf_base_func_proto(enum bpf_func_id func_id)
return &bpf_ringbuf_discard_proto;
case BPF_FUNC_ringbuf_query:
return &bpf_ringbuf_query_proto;
case BPF_FUNC_ringbuf_reserve_dynptr:
return &bpf_ringbuf_reserve_dynptr_proto;
case BPF_FUNC_ringbuf_submit_dynptr:
return &bpf_ringbuf_submit_dynptr_proto;
case BPF_FUNC_ringbuf_discard_dynptr:
return &bpf_ringbuf_discard_dynptr_proto;
case BPF_FUNC_for_each_map_elem:
return &bpf_for_each_map_elem_proto;
case BPF_FUNC_loop:
......
......@@ -475,3 +475,81 @@ const struct bpf_func_proto bpf_ringbuf_query_proto = {
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_4(bpf_ringbuf_reserve_dynptr, struct bpf_map *, map, u32, size, u64, flags,
struct bpf_dynptr_kern *, ptr)
{
struct bpf_ringbuf_map *rb_map;
void *sample;
int err;
if (unlikely(flags)) {
bpf_dynptr_set_null(ptr);
return -EINVAL;
}
err = bpf_dynptr_check_size(size);
if (err) {
bpf_dynptr_set_null(ptr);
return err;
}
rb_map = container_of(map, struct bpf_ringbuf_map, map);
sample = __bpf_ringbuf_reserve(rb_map->rb, size);
if (!sample) {
bpf_dynptr_set_null(ptr);
return -EINVAL;
}
bpf_dynptr_init(ptr, sample, BPF_DYNPTR_TYPE_RINGBUF, 0, size);
return 0;
}
const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = {
.func = bpf_ringbuf_reserve_dynptr,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT,
};
BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
{
if (!ptr->data)
return 0;
bpf_ringbuf_commit(ptr->data, flags, false /* discard */);
bpf_dynptr_set_null(ptr);
return 0;
}
const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto = {
.func = bpf_ringbuf_submit_dynptr,
.ret_type = RET_VOID,
.arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
.arg2_type = ARG_ANYTHING,
};
BPF_CALL_2(bpf_ringbuf_discard_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
{
if (!ptr->data)
return 0;
bpf_ringbuf_commit(ptr->data, flags, true /* discard */);
bpf_dynptr_set_null(ptr);
return 0;
}
const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto = {
.func = bpf_ringbuf_discard_dynptr,
.ret_type = RET_VOID,
.arg1_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | OBJ_RELEASE,
.arg2_type = ARG_ANYTHING,
};
......@@ -187,6 +187,9 @@ struct bpf_verifier_stack_elem {
POISON_POINTER_DELTA))
#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV))
static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx);
static int release_reference(struct bpf_verifier_env *env, int ref_obj_id);
static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux)
{
return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON;
......@@ -673,17 +676,24 @@ static enum bpf_dynptr_type arg_to_dynptr_type(enum bpf_arg_type arg_type)
switch (arg_type & DYNPTR_TYPE_FLAG_MASK) {
case DYNPTR_TYPE_LOCAL:
return BPF_DYNPTR_TYPE_LOCAL;
case DYNPTR_TYPE_RINGBUF:
return BPF_DYNPTR_TYPE_RINGBUF;
default:
return BPF_DYNPTR_TYPE_INVALID;
}
}
static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
{
return type == BPF_DYNPTR_TYPE_RINGBUF;
}
static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
enum bpf_arg_type arg_type, int insn_idx)
{
struct bpf_func_state *state = func(env, reg);
enum bpf_dynptr_type type;
int spi, i;
int spi, i, id;
spi = get_spi(reg->off);
......@@ -703,6 +713,16 @@ static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_
state->stack[spi].spilled_ptr.dynptr.type = type;
state->stack[spi - 1].spilled_ptr.dynptr.type = type;
if (dynptr_type_refcounted(type)) {
/* The id is used to track proper releasing */
id = acquire_reference_state(env, insn_idx);
if (id < 0)
return id;
state->stack[spi].spilled_ptr.id = id;
state->stack[spi - 1].spilled_ptr.id = id;
}
return 0;
}
......@@ -721,6 +741,13 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
state->stack[spi - 1].slot_type[i] = STACK_INVALID;
}
/* Invalidate any slices associated with this dynptr */
if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
release_reference(env, state->stack[spi].spilled_ptr.id);
state->stack[spi].spilled_ptr.id = 0;
state->stack[spi - 1].spilled_ptr.id = 0;
}
state->stack[spi].spilled_ptr.dynptr.first_slot = false;
state->stack[spi].spilled_ptr.dynptr.type = 0;
state->stack[spi - 1].spilled_ptr.dynptr.type = 0;
......@@ -5859,7 +5886,16 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
skip_type_check:
if (arg_type_is_release(arg_type)) {
if (!reg->ref_obj_id && !register_is_null(reg)) {
if (arg_type_is_dynptr(arg_type)) {
struct bpf_func_state *state = func(env, reg);
int spi = get_spi(reg->off);
if (!is_spi_bounds_valid(state, spi, BPF_DYNPTR_NR_SLOTS) ||
!state->stack[spi].spilled_ptr.id) {
verbose(env, "arg %d is an unacquired reference\n", regno);
return -EINVAL;
}
} else if (!reg->ref_obj_id && !register_is_null(reg)) {
verbose(env, "R%d must be referenced when passed to release function\n",
regno);
return -EINVAL;
......@@ -5994,9 +6030,13 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
case DYNPTR_TYPE_LOCAL:
err_extra = "local ";
break;
case DYNPTR_TYPE_RINGBUF:
err_extra = "ringbuf ";
break;
default:
break;
}
verbose(env, "Expected an initialized %sdynptr as arg #%d\n",
err_extra, arg + 1);
return -EINVAL;
......@@ -6122,7 +6162,10 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_MAP_TYPE_RINGBUF:
if (func_id != BPF_FUNC_ringbuf_output &&
func_id != BPF_FUNC_ringbuf_reserve &&
func_id != BPF_FUNC_ringbuf_query)
func_id != BPF_FUNC_ringbuf_query &&
func_id != BPF_FUNC_ringbuf_reserve_dynptr &&
func_id != BPF_FUNC_ringbuf_submit_dynptr &&
func_id != BPF_FUNC_ringbuf_discard_dynptr)
goto error;
break;
case BPF_MAP_TYPE_STACK_TRACE:
......@@ -6238,6 +6281,9 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_FUNC_ringbuf_output:
case BPF_FUNC_ringbuf_reserve:
case BPF_FUNC_ringbuf_query:
case BPF_FUNC_ringbuf_reserve_dynptr:
case BPF_FUNC_ringbuf_submit_dynptr:
case BPF_FUNC_ringbuf_discard_dynptr:
if (map->map_type != BPF_MAP_TYPE_RINGBUF)
goto error;
break;
......
......@@ -5189,6 +5189,38 @@ union bpf_attr {
* Return
* 0 on success, -E2BIG if the size exceeds DYNPTR_MAX_SIZE,
* -EINVAL if flags is not 0.
*
* long bpf_ringbuf_reserve_dynptr(void *ringbuf, u32 size, u64 flags, struct bpf_dynptr *ptr)
* Description
* Reserve *size* bytes of payload in a ring buffer *ringbuf*
* through the dynptr interface. *flags* must be 0.
*
* Please note that a corresponding bpf_ringbuf_submit_dynptr or
* bpf_ringbuf_discard_dynptr must be called on *ptr*, even if the
* reservation fails. This is enforced by the verifier.
* Return
* 0 on success, or a negative error in case of failure.
*
* void bpf_ringbuf_submit_dynptr(struct bpf_dynptr *ptr, u64 flags)
* Description
* Submit reserved ring buffer sample, pointed to by *data*,
* through the dynptr interface. This is a no-op if the dynptr is
* invalid/null.
*
* For more information on *flags*, please see
* 'bpf_ringbuf_submit'.
* Return
* Nothing. Always succeeds.
*
* void bpf_ringbuf_discard_dynptr(struct bpf_dynptr *ptr, u64 flags)
* Description
* Discard reserved ring buffer sample through the dynptr
* interface. This is a no-op if the dynptr is invalid/null.
*
* For more information on *flags*, please see
* 'bpf_ringbuf_discard'.
* Return
* Nothing. Always succeeds.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
......@@ -5389,6 +5421,9 @@ union bpf_attr {
FN(map_lookup_percpu_elem), \
FN(skc_to_mptcp_sock), \
FN(dynptr_from_mem), \
FN(ringbuf_reserve_dynptr), \
FN(ringbuf_submit_dynptr), \
FN(ringbuf_discard_dynptr), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment