Commit e3b073c7 authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-nfp-map-offload'

Jakub Kicinski says:

====================
This set adds support for creating maps on networking devices.  BPF is
programs+maps, the pure program offload has been around for quite some
time, this patchset adds the map part of the equation.

Maps are allocated on the target device from the start.  There is no
host copy when map is created on the device.  Device maps are represented
by struct bpf_offloaded_map, regardless of type.  Host programs can't
access such maps, access is only possible from a program also loaded
to the same device and/or via the BPF syscall.

Offloaded programs are currently only allowed to perform lookups,
control plane is responsible for populating the maps.

For brevity only infrastructure and basic NFP patches are included.
Target device reporting, netdevsim and tests will follow up as well as
some further optimizations to the NFP code.

v2:
 - leave out the array maps, we will add them trivially later to avoid
   merge conflicts with ongoing spectere&meltdown mitigations.
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents fdde5f3b 1bba4c41
...@@ -44,6 +44,7 @@ endif ...@@ -44,6 +44,7 @@ endif
ifeq ($(CONFIG_BPF_SYSCALL),y) ifeq ($(CONFIG_BPF_SYSCALL),y)
nfp-objs += \ nfp-objs += \
bpf/cmsg.o \
bpf/main.o \ bpf/main.o \
bpf/offload.o \ bpf/offload.o \
bpf/verifier.o \ bpf/verifier.o \
......
/*
* Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/bpf.h>
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/jiffies.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
#include "../nfp_app.h"
#include "../nfp_net.h"
#include "fw.h"
#include "main.h"
#define cmsg_warn(bpf, msg...) nn_dp_warn(&(bpf)->app->ctrl->dp, msg)
#define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
{
u16 used_tags;
used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
}
static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
{
/* All FW communication for BPF is request-reply. To make sure we
* don't reuse the message ID too early after timeout - limit the
* number of requests in flight.
*/
if (nfp_bpf_all_tags_busy(bpf)) {
cmsg_warn(bpf, "all FW request contexts busy!\n");
return -EAGAIN;
}
WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
return bpf->tag_alloc_next++;
}
static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
{
WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
bpf->tag_alloc_last != bpf->tag_alloc_next)
bpf->tag_alloc_last++;
}
static struct sk_buff *
nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
{
struct sk_buff *skb;
skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
skb_put(skb, size);
return skb;
}
static struct sk_buff *
nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
{
unsigned int size;
size = sizeof(struct cmsg_req_map_op);
size += sizeof(struct cmsg_key_value_pair) * n;
return nfp_bpf_cmsg_alloc(bpf, size);
}
static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
{
struct cmsg_hdr *hdr;
hdr = (struct cmsg_hdr *)skb->data;
return be16_to_cpu(hdr->tag);
}
static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
{
unsigned int msg_tag;
struct sk_buff *skb;
skb_queue_walk(&bpf->cmsg_replies, skb) {
msg_tag = nfp_bpf_cmsg_get_tag(skb);
if (msg_tag == tag) {
nfp_bpf_free_tag(bpf, tag);
__skb_unlink(skb, &bpf->cmsg_replies);
return skb;
}
}
return NULL;
}
static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
{
struct sk_buff *skb;
nfp_ctrl_lock(bpf->app->ctrl);
skb = __nfp_bpf_reply(bpf, tag);
nfp_ctrl_unlock(bpf->app->ctrl);
return skb;
}
static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
{
struct sk_buff *skb;
nfp_ctrl_lock(bpf->app->ctrl);
skb = __nfp_bpf_reply(bpf, tag);
if (!skb)
nfp_bpf_free_tag(bpf, tag);
nfp_ctrl_unlock(bpf->app->ctrl);
return skb;
}
static struct sk_buff *
nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
int tag)
{
struct sk_buff *skb;
int err;
err = wait_event_interruptible_timeout(bpf->cmsg_wq,
skb = nfp_bpf_reply(bpf, tag),
msecs_to_jiffies(5000));
/* We didn't get a response - try last time and atomically drop
* the tag even if no response is matched.
*/
if (!skb)
skb = nfp_bpf_reply_drop_tag(bpf, tag);
if (err < 0) {
cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
err == ERESTARTSYS ? "interrupted" : "error",
type, err);
return ERR_PTR(err);
}
if (!skb) {
cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
type);
return ERR_PTR(-ETIMEDOUT);
}
return skb;
}
static struct sk_buff *
nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
enum nfp_bpf_cmsg_type type, unsigned int reply_size)
{
struct cmsg_hdr *hdr;
int tag;
nfp_ctrl_lock(bpf->app->ctrl);
tag = nfp_bpf_alloc_tag(bpf);
if (tag < 0) {
nfp_ctrl_unlock(bpf->app->ctrl);
dev_kfree_skb_any(skb);
return ERR_PTR(tag);
}
hdr = (void *)skb->data;
hdr->ver = CMSG_MAP_ABI_VERSION;
hdr->type = type;
hdr->tag = cpu_to_be16(tag);
__nfp_app_ctrl_tx(bpf->app, skb);
nfp_ctrl_unlock(bpf->app->ctrl);
skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
if (IS_ERR(skb))
return skb;
hdr = (struct cmsg_hdr *)skb->data;
/* 0 reply_size means caller will do the validation */
if (reply_size && skb->len != reply_size) {
cmsg_warn(bpf, "cmsg drop - wrong size %d != %d!\n",
skb->len, reply_size);
goto err_free;
}
if (hdr->type != __CMSG_REPLY(type)) {
cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
hdr->type, __CMSG_REPLY(type));
goto err_free;
}
return skb;
err_free:
dev_kfree_skb_any(skb);
return ERR_PTR(-EIO);
}
static int
nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
struct cmsg_reply_map_simple *reply)
{
static const int res_table[] = {
[CMSG_RC_SUCCESS] = 0,
[CMSG_RC_ERR_MAP_FD] = -EBADFD,
[CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
[CMSG_RC_ERR_MAP_ERR] = -EINVAL,
[CMSG_RC_ERR_MAP_PARSE] = -EIO,
[CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
[CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
[CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
};
u32 rc;
rc = be32_to_cpu(reply->rc);
if (rc >= ARRAY_SIZE(res_table)) {
cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
return -EIO;
}
return res_table[rc];
}
long long int
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
{
struct cmsg_reply_map_alloc_tbl *reply;
struct cmsg_req_map_alloc_tbl *req;
struct sk_buff *skb;
u32 tid;
int err;
skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
if (!skb)
return -ENOMEM;
req = (void *)skb->data;
req->key_size = cpu_to_be32(map->key_size);
req->value_size = cpu_to_be32(map->value_size);
req->max_entries = cpu_to_be32(map->max_entries);
req->map_type = cpu_to_be32(map->map_type);
req->map_flags = 0;
skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
sizeof(*reply));
if (IS_ERR(skb))
return PTR_ERR(skb);
reply = (void *)skb->data;
err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
if (err)
goto err_free;
tid = be32_to_cpu(reply->tid);
dev_consume_skb_any(skb);
return tid;
err_free:
dev_kfree_skb_any(skb);
return err;
}
void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
{
struct cmsg_reply_map_free_tbl *reply;
struct cmsg_req_map_free_tbl *req;
struct sk_buff *skb;
int err;
skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
if (!skb) {
cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
return;
}
req = (void *)skb->data;
req->tid = cpu_to_be32(nfp_map->tid);
skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
sizeof(*reply));
if (IS_ERR(skb)) {
cmsg_warn(bpf, "leaking map - I/O error\n");
return;
}
reply = (void *)skb->data;
err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
if (err)
cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
dev_consume_skb_any(skb);
}
static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
enum nfp_bpf_cmsg_type op,
u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
{
struct nfp_bpf_map *nfp_map = offmap->dev_priv;
struct nfp_app_bpf *bpf = nfp_map->bpf;
struct bpf_map *map = &offmap->map;
struct cmsg_reply_map_op *reply;
struct cmsg_req_map_op *req;
struct sk_buff *skb;
int err;
/* FW messages have no space for more than 32 bits of flags */
if (flags >> 32)
return -EOPNOTSUPP;
skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
if (!skb)
return -ENOMEM;
req = (void *)skb->data;
req->tid = cpu_to_be32(nfp_map->tid);
req->count = cpu_to_be32(1);
req->flags = cpu_to_be32(flags);
/* Copy inputs */
if (key)
memcpy(&req->elem[0].key, key, map->key_size);
if (value)
memcpy(&req->elem[0].value, value, map->value_size);
skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
sizeof(*reply) + sizeof(*reply->elem));
if (IS_ERR(skb))
return PTR_ERR(skb);
reply = (void *)skb->data;
err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
if (err)
goto err_free;
/* Copy outputs */
if (out_key)
memcpy(out_key, &reply->elem[0].key, map->key_size);
if (out_value)
memcpy(out_value, &reply->elem[0].value, map->value_size);
dev_consume_skb_any(skb);
return 0;
err_free:
dev_kfree_skb_any(skb);
return err;
}
int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags)
{
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
key, value, flags, NULL, NULL);
}
int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
{
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
key, NULL, 0, NULL, NULL);
}
int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value)
{
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
key, NULL, 0, NULL, value);
}
int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
void *next_key)
{
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
NULL, NULL, 0, next_key, NULL);
}
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key)
{
return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
key, NULL, 0, next_key, NULL);
}
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_app_bpf *bpf = app->priv;
unsigned int tag;
if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
goto err_free;
}
nfp_ctrl_lock(bpf->app->ctrl);
tag = nfp_bpf_cmsg_get_tag(skb);
if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
tag);
goto err_unlock;
}
__skb_queue_tail(&bpf->cmsg_replies, skb);
wake_up_interruptible_all(&bpf->cmsg_wq);
nfp_ctrl_unlock(bpf->app->ctrl);
return;
err_unlock:
nfp_ctrl_unlock(bpf->app->ctrl);
err_free:
dev_kfree_skb_any(skb);
}
...@@ -38,7 +38,14 @@ ...@@ -38,7 +38,14 @@
#include <linux/types.h> #include <linux/types.h>
enum bpf_cap_tlv_type { enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_FUNC = 1,
NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2, NFP_BPF_CAP_TYPE_ADJUST_HEAD = 2,
NFP_BPF_CAP_TYPE_MAPS = 3,
};
struct nfp_bpf_cap_tlv_func {
__le32 func_id;
__le32 func_addr;
}; };
struct nfp_bpf_cap_tlv_adjust_head { struct nfp_bpf_cap_tlv_adjust_head {
...@@ -51,4 +58,100 @@ struct nfp_bpf_cap_tlv_adjust_head { ...@@ -51,4 +58,100 @@ struct nfp_bpf_cap_tlv_adjust_head {
#define NFP_BPF_ADJUST_HEAD_NO_META BIT(0) #define NFP_BPF_ADJUST_HEAD_NO_META BIT(0)
struct nfp_bpf_cap_tlv_maps {
__le32 types;
__le32 max_maps;
__le32 max_elems;
__le32 max_key_sz;
__le32 max_val_sz;
__le32 max_elem_sz;
};
/*
* Types defined for map related control messages
*/
#define CMSG_MAP_ABI_VERSION 1
enum nfp_bpf_cmsg_type {
CMSG_TYPE_MAP_ALLOC = 1,
CMSG_TYPE_MAP_FREE = 2,
CMSG_TYPE_MAP_LOOKUP = 3,
CMSG_TYPE_MAP_UPDATE = 4,
CMSG_TYPE_MAP_DELETE = 5,
CMSG_TYPE_MAP_GETNEXT = 6,
CMSG_TYPE_MAP_GETFIRST = 7,
__CMSG_TYPE_MAP_MAX,
};
#define CMSG_TYPE_MAP_REPLY_BIT 7
#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
#define CMSG_MAP_KEY_LW 16
#define CMSG_MAP_VALUE_LW 16
enum nfp_bpf_cmsg_status {
CMSG_RC_SUCCESS = 0,
CMSG_RC_ERR_MAP_FD = 1,
CMSG_RC_ERR_MAP_NOENT = 2,
CMSG_RC_ERR_MAP_ERR = 3,
CMSG_RC_ERR_MAP_PARSE = 4,
CMSG_RC_ERR_MAP_EXIST = 5,
CMSG_RC_ERR_MAP_NOMEM = 6,
CMSG_RC_ERR_MAP_E2BIG = 7,
};
struct cmsg_hdr {
u8 type;
u8 ver;
__be16 tag;
};
struct cmsg_reply_map_simple {
struct cmsg_hdr hdr;
__be32 rc;
};
struct cmsg_req_map_alloc_tbl {
struct cmsg_hdr hdr;
__be32 key_size; /* in bytes */
__be32 value_size; /* in bytes */
__be32 max_entries;
__be32 map_type;
__be32 map_flags; /* reserved */
};
struct cmsg_reply_map_alloc_tbl {
struct cmsg_reply_map_simple reply_hdr;
__be32 tid;
};
struct cmsg_req_map_free_tbl {
struct cmsg_hdr hdr;
__be32 tid;
};
struct cmsg_reply_map_free_tbl {
struct cmsg_reply_map_simple reply_hdr;
__be32 count;
};
struct cmsg_key_value_pair {
__be32 key[CMSG_MAP_KEY_LW];
__be32 value[CMSG_MAP_VALUE_LW];
};
struct cmsg_req_map_op {
struct cmsg_hdr hdr;
__be32 tid;
__be32 count;
__be32 flags;
struct cmsg_key_value_pair elem[0];
};
struct cmsg_reply_map_op {
struct cmsg_reply_map_simple reply_hdr;
__be32 count;
__be32 resv;
struct cmsg_key_value_pair elem[0];
};
#endif #endif
...@@ -483,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm) ...@@ -483,6 +483,21 @@ static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
} }
} }
static void
wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
enum nfp_relo_type relo)
{
if (imm > 0xffff) {
pr_err("relocation of a large immediate!\n");
nfp_prog->error = -EFAULT;
return;
}
emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
nfp_prog->prog[nfp_prog->prog_len - 1] |=
FIELD_PREP(OP_RELO_TYPE, relo);
}
/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted) /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
* If the @imm is small enough encode it directly in operand and return * If the @imm is small enough encode it directly in operand and return
* otherwise load @imm to a spare register and return its encoding. * otherwise load @imm to a spare register and return its encoding.
...@@ -538,27 +553,51 @@ wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len, ...@@ -538,27 +553,51 @@ wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true); emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
} }
static void
addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
swreg *rega, swreg *regb)
{
if (offset == reg_imm(0)) {
*rega = reg_a(src_gpr);
*regb = reg_b(src_gpr + 1);
return;
}
emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
reg_imm(0));
*rega = imm_a(nfp_prog);
*regb = imm_b(nfp_prog);
}
/* NFP has Command Push Pull bus which supports bluk memory operations. */ /* NFP has Command Push Pull bus which supports bluk memory operations. */
static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{ {
bool descending_seq = meta->ldst_gather_len < 0; bool descending_seq = meta->ldst_gather_len < 0;
s16 len = abs(meta->ldst_gather_len); s16 len = abs(meta->ldst_gather_len);
swreg src_base, off; swreg src_base, off;
bool src_40bit_addr;
unsigned int i; unsigned int i;
u8 xfer_num; u8 xfer_num;
off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
src_base = reg_a(meta->insn.src_reg * 2); src_base = reg_a(meta->insn.src_reg * 2);
xfer_num = round_up(len, 4) / 4; xfer_num = round_up(len, 4) / 4;
if (src_40bit_addr)
addr40_offset(nfp_prog, meta->insn.src_reg, off, &src_base,
&off);
/* Setup PREV_ALU fields to override memory read length. */ /* Setup PREV_ALU fields to override memory read length. */
if (len > 32) if (len > 32)
wrp_immed(nfp_prog, reg_none(), wrp_immed(nfp_prog, reg_none(),
CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1)); CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
/* Memory read from source addr into transfer-in registers. */ /* Memory read from source addr into transfer-in registers. */
emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base, emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
off, xfer_num - 1, true, len > 32); src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
src_base, off, xfer_num - 1, true, len > 32);
/* Move from transfer-in to transfer-out. */ /* Move from transfer-in to transfer-out. */
for (i = 0; i < xfer_num; i++) for (i = 0; i < xfer_num; i++)
...@@ -696,20 +735,20 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size) ...@@ -696,20 +735,20 @@ data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
} }
static int static int
data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
u8 dst_gpr, int size) swreg lreg, swreg rreg, int size, enum cmd_mode mode)
{ {
unsigned int i; unsigned int i;
u8 mask, sz; u8 mask, sz;
/* We load the value from the address indicated in @offset and then /* We load the value from the address indicated in rreg + lreg and then
* mask out the data we don't need. Note: this is little endian! * mask out the data we don't need. Note: this is little endian!
*/ */
sz = max(size, 4); sz = max(size, 4);
mask = size < 4 ? GENMASK(size - 1, 0) : 0; mask = size < 4 ? GENMASK(size - 1, 0) : 0;
emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
reg_a(src_gpr), offset, sz / 4 - 1, true); lreg, rreg, sz / 4 - 1, true);
i = 0; i = 0;
if (mask) if (mask)
...@@ -725,6 +764,26 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset, ...@@ -725,6 +764,26 @@ data_ld_host_order(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
return 0; return 0;
} }
static int
data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
u8 dst_gpr, u8 size)
{
return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
size, CMD_MODE_32b);
}
static int
data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
u8 dst_gpr, u8 size)
{
swreg rega, regb;
addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
size, CMD_MODE_40b_BA);
}
static int static int
construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size) construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
{ {
...@@ -1279,6 +1338,56 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -1279,6 +1338,56 @@ static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return 0; return 0;
} }
static int
map_lookup_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
struct bpf_offloaded_map *offmap;
struct nfp_bpf_map *nfp_map;
bool load_lm_ptr;
u32 ret_tgt;
s64 lm_off;
swreg tid;
offmap = (struct bpf_offloaded_map *)meta->arg1.map_ptr;
nfp_map = offmap->dev_priv;
/* We only have to reload LM0 if the key is not at start of stack */
lm_off = nfp_prog->stack_depth;
lm_off += meta->arg2.var_off.value + meta->arg2.off;
load_lm_ptr = meta->arg2_var_off || lm_off;
/* Set LM0 to start of key */
if (load_lm_ptr)
emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
/* Load map ID into a register, it should actually fit as an immediate
* but in case it doesn't deal with it here, not in the delay slots.
*/
tid = ur_load_imm_any(nfp_prog, nfp_map->tid, imm_a(nfp_prog));
emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + BPF_FUNC_map_lookup_elem,
2, RELO_BR_HELPER);
ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
/* Load map ID into A0 */
wrp_mov(nfp_prog, reg_a(0), tid);
/* Load the return address into B0 */
wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
return -EINVAL;
/* Reset the LM0 pointer */
if (!load_lm_ptr)
return 0;
emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
wrp_nops(nfp_prog, 3);
return 0;
}
/* --- Callbacks --- */ /* --- Callbacks --- */
static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{ {
...@@ -1713,8 +1822,20 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -1713,8 +1822,20 @@ mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog)); tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
return data_ld_host_order(nfp_prog, meta->insn.src_reg * 2, tmp_reg, return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
meta->insn.dst_reg * 2, size); tmp_reg, meta->insn.dst_reg * 2, size);
}
static int
mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int size)
{
swreg tmp_reg;
tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
tmp_reg, meta->insn.dst_reg * 2, size);
} }
static int static int
...@@ -1738,6 +1859,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -1738,6 +1859,9 @@ mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return mem_ldx_stack(nfp_prog, meta, size, return mem_ldx_stack(nfp_prog, meta, size,
meta->ptr.off + meta->ptr.var_off.value); meta->ptr.off + meta->ptr.var_off.value);
if (meta->ptr.type == PTR_TO_MAP_VALUE)
return mem_ldx_emem(nfp_prog, meta, size);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -2058,6 +2182,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -2058,6 +2182,8 @@ static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
switch (meta->insn.imm) { switch (meta->insn.imm) {
case BPF_FUNC_xdp_adjust_head: case BPF_FUNC_xdp_adjust_head:
return adjust_head(nfp_prog, meta); return adjust_head(nfp_prog, meta);
case BPF_FUNC_map_lookup_elem:
return map_lookup_stack(nfp_prog, meta);
default: default:
WARN_ONCE(1, "verifier allowed unsupported function\n"); WARN_ONCE(1, "verifier allowed unsupported function\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -2794,6 +2920,7 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) ...@@ -2794,6 +2920,7 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
for (i = 0; i < nfp_prog->prog_len; i++) { for (i = 0; i < nfp_prog->prog_len; i++) {
enum nfp_relo_type special; enum nfp_relo_type special;
u32 val;
special = FIELD_GET(OP_RELO_TYPE, prog[i]); special = FIELD_GET(OP_RELO_TYPE, prog[i]);
switch (special) { switch (special) {
...@@ -2813,6 +2940,24 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv) ...@@ -2813,6 +2940,24 @@ void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
case RELO_BR_NEXT_PKT: case RELO_BR_NEXT_PKT:
br_set_offset(&prog[i], bv->tgt_done); br_set_offset(&prog[i], bv->tgt_done);
break; break;
case RELO_BR_HELPER:
val = br_get_offset(prog[i]);
val -= BR_OFF_RELO;
switch (val) {
case BPF_FUNC_map_lookup_elem:
val = nfp_prog->bpf->helpers.map_lookup;
break;
default:
pr_err("relocation of unknown helper %d\n",
val);
err = -EINVAL;
goto err_free_prog;
}
br_set_offset(&prog[i], val);
break;
case RELO_IMMED_REL:
immed_add_value(&prog[i], bv->start_off);
break;
} }
prog[i] &= ~OP_RELO_TYPE; prog[i] &= ~OP_RELO_TYPE;
......
...@@ -251,6 +251,45 @@ nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value, ...@@ -251,6 +251,45 @@ nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
return 0; return 0;
} }
static int
nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
{
struct nfp_bpf_cap_tlv_func __iomem *cap = value;
if (length < sizeof(*cap)) {
nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
return -EINVAL;
}
switch (readl(&cap->func_id)) {
case BPF_FUNC_map_lookup_elem:
bpf->helpers.map_lookup = readl(&cap->func_addr);
break;
}
return 0;
}
static int
nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
{
struct nfp_bpf_cap_tlv_maps __iomem *cap = value;
if (length < sizeof(*cap)) {
nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
return -EINVAL;
}
bpf->maps.types = readl(&cap->types);
bpf->maps.max_maps = readl(&cap->max_maps);
bpf->maps.max_elems = readl(&cap->max_elems);
bpf->maps.max_key_sz = readl(&cap->max_key_sz);
bpf->maps.max_val_sz = readl(&cap->max_val_sz);
bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
return 0;
}
static int nfp_bpf_parse_capabilities(struct nfp_app *app) static int nfp_bpf_parse_capabilities(struct nfp_app *app)
{ {
struct nfp_cpp *cpp = app->pf->cpp; struct nfp_cpp *cpp = app->pf->cpp;
...@@ -276,11 +315,19 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app) ...@@ -276,11 +315,19 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
goto err_release_free; goto err_release_free;
switch (type) { switch (type) {
case NFP_BPF_CAP_TYPE_FUNC:
if (nfp_bpf_parse_cap_func(app->priv, value, length))
goto err_release_free;
break;
case NFP_BPF_CAP_TYPE_ADJUST_HEAD: case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
if (nfp_bpf_parse_cap_adjust_head(app->priv, value, if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
length)) length))
goto err_release_free; goto err_release_free;
break; break;
case NFP_BPF_CAP_TYPE_MAPS:
if (nfp_bpf_parse_cap_maps(app->priv, value, length))
goto err_release_free;
break;
default: default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type); nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break; break;
...@@ -313,6 +360,10 @@ static int nfp_bpf_init(struct nfp_app *app) ...@@ -313,6 +360,10 @@ static int nfp_bpf_init(struct nfp_app *app)
bpf->app = app; bpf->app = app;
app->priv = bpf; app->priv = bpf;
skb_queue_head_init(&bpf->cmsg_replies);
init_waitqueue_head(&bpf->cmsg_wq);
INIT_LIST_HEAD(&bpf->map_list);
err = nfp_bpf_parse_capabilities(app); err = nfp_bpf_parse_capabilities(app);
if (err) if (err)
goto err_free_bpf; goto err_free_bpf;
...@@ -326,7 +377,12 @@ static int nfp_bpf_init(struct nfp_app *app) ...@@ -326,7 +377,12 @@ static int nfp_bpf_init(struct nfp_app *app)
static void nfp_bpf_clean(struct nfp_app *app) static void nfp_bpf_clean(struct nfp_app *app)
{ {
kfree(app->priv); struct nfp_app_bpf *bpf = app->priv;
WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
WARN_ON(!list_empty(&bpf->map_list));
WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
kfree(bpf);
} }
const struct nfp_app_type app_bpf = { const struct nfp_app_type app_bpf = {
...@@ -343,6 +399,8 @@ const struct nfp_app_type app_bpf = { ...@@ -343,6 +399,8 @@ const struct nfp_app_type app_bpf = {
.vnic_alloc = nfp_bpf_vnic_alloc, .vnic_alloc = nfp_bpf_vnic_alloc,
.vnic_free = nfp_bpf_vnic_free, .vnic_free = nfp_bpf_vnic_free,
.ctrl_msg_rx = nfp_bpf_ctrl_msg_rx,
.setup_tc = nfp_bpf_setup_tc, .setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy, .tc_busy = nfp_bpf_tc_busy,
.bpf = nfp_ndo_bpf, .bpf = nfp_ndo_bpf,
......
...@@ -37,10 +37,14 @@ ...@@ -37,10 +37,14 @@
#include <linux/bitfield.h> #include <linux/bitfield.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/bpf_verifier.h> #include <linux/bpf_verifier.h>
#include <linux/kernel.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/wait.h>
#include "../nfp_asm.h" #include "../nfp_asm.h"
#include "fw.h"
/* For relocation logic use up-most byte of branch instruction as scratch /* For relocation logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW! * area. Remember to clear this before sending instructions to HW!
...@@ -56,6 +60,9 @@ enum nfp_relo_type { ...@@ -56,6 +60,9 @@ enum nfp_relo_type {
RELO_BR_GO_ABORT, RELO_BR_GO_ABORT,
/* external jumps to fixed addresses */ /* external jumps to fixed addresses */
RELO_BR_NEXT_PKT, RELO_BR_NEXT_PKT,
RELO_BR_HELPER,
/* immediate relocation against load address */
RELO_IMMED_REL,
}; };
/* To make absolute relocated branches (branches other than RELO_BR_REL) /* To make absolute relocated branches (branches other than RELO_BR_REL)
...@@ -93,16 +100,49 @@ enum pkt_vec { ...@@ -93,16 +100,49 @@ enum pkt_vec {
* struct nfp_app_bpf - bpf app priv structure * struct nfp_app_bpf - bpf app priv structure
* @app: backpointer to the app * @app: backpointer to the app
* *
* @tag_allocator: bitmap of control message tags in use
* @tag_alloc_next: next tag bit to allocate
* @tag_alloc_last: next tag bit to be freed
*
* @cmsg_replies: received cmsg replies waiting to be consumed
* @cmsg_wq: work queue for waiting for cmsg replies
*
* @map_list: list of offloaded maps
* @maps_in_use: number of currently offloaded maps
* @map_elems_in_use: number of elements allocated to offloaded maps
*
* @adjust_head: adjust head capability * @adjust_head: adjust head capability
* @flags: extra flags for adjust head * @flags: extra flags for adjust head
* @off_min: minimal packet offset within buffer required * @off_min: minimal packet offset within buffer required
* @off_max: maximum packet offset within buffer required * @off_max: maximum packet offset within buffer required
* @guaranteed_sub: amount of negative adjustment guaranteed possible * @guaranteed_sub: amount of negative adjustment guaranteed possible
* @guaranteed_add: amount of positive adjustment guaranteed possible * @guaranteed_add: amount of positive adjustment guaranteed possible
*
* @maps: map capability
* @types: supported map types
* @max_maps: max number of maps supported
* @max_elems: max number of entries in each map
* @max_key_sz: max size of map key
* @max_val_sz: max size of map value
* @max_elem_sz: max size of map entry (key + value)
*
* @helpers: helper addressess for various calls
* @map_lookup: map lookup helper address
*/ */
struct nfp_app_bpf { struct nfp_app_bpf {
struct nfp_app *app; struct nfp_app *app;
DECLARE_BITMAP(tag_allocator, U16_MAX + 1);
u16 tag_alloc_next;
u16 tag_alloc_last;
struct sk_buff_head cmsg_replies;
struct wait_queue_head cmsg_wq;
struct list_head map_list;
unsigned int maps_in_use;
unsigned int map_elems_in_use;
struct nfp_bpf_cap_adjust_head { struct nfp_bpf_cap_adjust_head {
u32 flags; u32 flags;
int off_min; int off_min;
...@@ -110,6 +150,33 @@ struct nfp_app_bpf { ...@@ -110,6 +150,33 @@ struct nfp_app_bpf {
int guaranteed_sub; int guaranteed_sub;
int guaranteed_add; int guaranteed_add;
} adjust_head; } adjust_head;
struct {
u32 types;
u32 max_maps;
u32 max_elems;
u32 max_key_sz;
u32 max_val_sz;
u32 max_elem_sz;
} maps;
struct {
u32 map_lookup;
} helpers;
};
/**
* struct nfp_bpf_map - private per-map data attached to BPF maps for offload
* @offmap: pointer to the offloaded BPF map
* @bpf: back pointer to bpf app private structure
* @tid: table id identifying map on datapath
* @l: link on the nfp_app_bpf->map_list list
*/
struct nfp_bpf_map {
struct bpf_offloaded_map *offmap;
struct nfp_app_bpf *bpf;
u32 tid;
struct list_head l;
}; };
struct nfp_prog; struct nfp_prog;
...@@ -131,9 +198,12 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *); ...@@ -131,9 +198,12 @@ typedef int (*instr_cb_t)(struct nfp_prog *, struct nfp_insn_meta *);
* @ptr: pointer type for memory operations * @ptr: pointer type for memory operations
* @ldst_gather_len: memcpy length gathered from load/store sequence * @ldst_gather_len: memcpy length gathered from load/store sequence
* @paired_st: the paired store insn at the head of the sequence * @paired_st: the paired store insn at the head of the sequence
* @arg2: arg2 for call instructions
* @ptr_not_const: pointer is not always constant * @ptr_not_const: pointer is not always constant
* @jmp_dst: destination info for jump instructions * @jmp_dst: destination info for jump instructions
* @func_id: function id for call instructions
* @arg1: arg1 for call instructions
* @arg2: arg2 for call instructions
* @arg2_var_off: arg2 changes stack offset on different paths
* @off: index of first generated machine instruction (in nfp_prog.prog) * @off: index of first generated machine instruction (in nfp_prog.prog)
* @n: eBPF instruction number * @n: eBPF instruction number
* @flags: eBPF instruction extra optimization flags * @flags: eBPF instruction extra optimization flags
...@@ -151,7 +221,12 @@ struct nfp_insn_meta { ...@@ -151,7 +221,12 @@ struct nfp_insn_meta {
bool ptr_not_const; bool ptr_not_const;
}; };
struct nfp_insn_meta *jmp_dst; struct nfp_insn_meta *jmp_dst;
struct bpf_reg_state arg2; struct {
u32 func_id;
struct bpf_reg_state arg1;
struct bpf_reg_state arg2;
bool arg2_var_off;
};
}; };
unsigned int off; unsigned int off;
unsigned short n; unsigned short n;
...@@ -266,4 +341,20 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -266,4 +341,20 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns); unsigned int insn_idx, unsigned int n_insns);
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
long long int
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
void
nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map);
int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
void *next_key);
int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
void *key, void *value, u64 flags);
int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key);
int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
void *key, void *value);
int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
void *key, void *next_key);
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb);
#endif #endif
...@@ -36,6 +36,9 @@ ...@@ -36,6 +36,9 @@
* Netronome network device driver: TC offload functions for PF and VF * Netronome network device driver: TC offload functions for PF and VF
*/ */
#define pr_fmt(fmt) "NFP net bpf: " fmt
#include <linux/bpf.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/pci.h> #include <linux/pci.h>
...@@ -153,6 +156,103 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) ...@@ -153,6 +156,103 @@ static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
return 0; return 0;
} }
static int
nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
void *key, void *next_key)
{
if (!key)
return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
}
static int
nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
{
return nfp_bpf_ctrl_del_entry(offmap, key);
}
static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
.map_get_next_key = nfp_bpf_map_get_next_key,
.map_lookup_elem = nfp_bpf_ctrl_lookup_entry,
.map_update_elem = nfp_bpf_ctrl_update_entry,
.map_delete_elem = nfp_bpf_map_delete_elem,
};
static int
nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
{
struct nfp_bpf_map *nfp_map;
long long int res;
if (!bpf->maps.types)
return -EOPNOTSUPP;
if (offmap->map.map_flags ||
offmap->map.numa_node != NUMA_NO_NODE) {
pr_info("map flags are not supported\n");
return -EINVAL;
}
if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
pr_info("map type not supported\n");
return -EOPNOTSUPP;
}
if (bpf->maps.max_maps == bpf->maps_in_use) {
pr_info("too many maps for a device\n");
return -ENOMEM;
}
if (bpf->maps.max_elems - bpf->map_elems_in_use <
offmap->map.max_entries) {
pr_info("map with too many elements: %u, left: %u\n",
offmap->map.max_entries,
bpf->maps.max_elems - bpf->map_elems_in_use);
return -ENOMEM;
}
if (offmap->map.key_size > bpf->maps.max_key_sz ||
offmap->map.value_size > bpf->maps.max_val_sz ||
round_up(offmap->map.key_size, 8) +
round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
pr_info("elements don't fit in device constraints\n");
return -ENOMEM;
}
nfp_map = kzalloc(sizeof(*nfp_map), GFP_USER);
if (!nfp_map)
return -ENOMEM;
offmap->dev_priv = nfp_map;
nfp_map->offmap = offmap;
nfp_map->bpf = bpf;
res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
if (res < 0) {
kfree(nfp_map);
return res;
}
nfp_map->tid = res;
offmap->dev_ops = &nfp_bpf_map_ops;
bpf->maps_in_use++;
bpf->map_elems_in_use += offmap->map.max_entries;
list_add_tail(&nfp_map->l, &bpf->map_list);
return 0;
}
static int
nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
{
struct nfp_bpf_map *nfp_map = offmap->dev_priv;
nfp_bpf_ctrl_free_map(bpf, nfp_map);
list_del_init(&nfp_map->l);
bpf->map_elems_in_use -= offmap->map.max_entries;
bpf->maps_in_use--;
kfree(nfp_map);
return 0;
}
int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
{ {
switch (bpf->command) { switch (bpf->command) {
...@@ -162,6 +262,10 @@ int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) ...@@ -162,6 +262,10 @@ int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
return nfp_bpf_translate(nn, bpf->offload.prog); return nfp_bpf_translate(nn, bpf->offload.prog);
case BPF_OFFLOAD_DESTROY: case BPF_OFFLOAD_DESTROY:
return nfp_bpf_destroy(nn, bpf->offload.prog); return nfp_bpf_destroy(nn, bpf->offload.prog);
case BPF_OFFLOAD_MAP_ALLOC:
return nfp_bpf_map_alloc(app->priv, bpf->offmap);
case BPF_OFFLOAD_MAP_FREE:
return nfp_bpf_map_free(app->priv, bpf->offmap);
default: default:
return -EINVAL; return -EINVAL;
} }
...@@ -237,7 +341,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, ...@@ -237,7 +341,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
int err; int err;
if (prog) { if (prog) {
struct bpf_dev_offload *offload = prog->aux->offload; struct bpf_prog_offload *offload = prog->aux->offload;
if (!offload) if (!offload)
return -EINVAL; return -EINVAL;
......
...@@ -110,9 +110,11 @@ static int ...@@ -110,9 +110,11 @@ static int
nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
struct nfp_insn_meta *meta) struct nfp_insn_meta *meta)
{ {
const struct bpf_reg_state *reg1 = cur_regs(env) + BPF_REG_1;
const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2; const struct bpf_reg_state *reg2 = cur_regs(env) + BPF_REG_2;
struct nfp_app_bpf *bpf = nfp_prog->bpf; struct nfp_app_bpf *bpf = nfp_prog->bpf;
u32 func_id = meta->insn.imm; u32 func_id = meta->insn.imm;
s64 off, old_off;
switch (func_id) { switch (func_id) {
case BPF_FUNC_xdp_adjust_head: case BPF_FUNC_xdp_adjust_head:
...@@ -127,11 +129,48 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, ...@@ -127,11 +129,48 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
nfp_record_adjust_head(bpf, nfp_prog, meta, reg2); nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
break; break;
case BPF_FUNC_map_lookup_elem:
if (!bpf->helpers.map_lookup) {
pr_info("map_lookup: not supported by FW\n");
return -EOPNOTSUPP;
}
if (reg2->type != PTR_TO_STACK) {
pr_info("map_lookup: unsupported key ptr type %d\n",
reg2->type);
return -EOPNOTSUPP;
}
if (!tnum_is_const(reg2->var_off)) {
pr_info("map_lookup: variable key pointer\n");
return -EOPNOTSUPP;
}
off = reg2->var_off.value + reg2->off;
if (-off % 4) {
pr_info("map_lookup: unaligned stack pointer %lld\n",
-off);
return -EOPNOTSUPP;
}
/* Rest of the checks is only if we re-parse the same insn */
if (!meta->func_id)
break;
old_off = meta->arg2.var_off.value + meta->arg2.off;
meta->arg2_var_off |= off != old_off;
if (meta->arg1.map_ptr != reg1->map_ptr) {
pr_info("map_lookup: called for different map\n");
return -EOPNOTSUPP;
}
break;
default: default:
pr_vlog(env, "unsupported function id: %d\n", func_id); pr_vlog(env, "unsupported function id: %d\n", func_id);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
meta->func_id = func_id;
meta->arg1 = *reg1;
meta->arg2 = *reg2; meta->arg2 = *reg2;
return 0; return 0;
...@@ -210,6 +249,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -210,6 +249,7 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
if (reg->type != PTR_TO_CTX && if (reg->type != PTR_TO_CTX &&
reg->type != PTR_TO_STACK && reg->type != PTR_TO_STACK &&
reg->type != PTR_TO_MAP_VALUE &&
reg->type != PTR_TO_PACKET) { reg->type != PTR_TO_PACKET) {
pr_vlog(env, "unsupported ptr type: %d\n", reg->type); pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
return -EINVAL; return -EINVAL;
...@@ -221,6 +261,13 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -221,6 +261,13 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
return err; return err;
} }
if (reg->type == PTR_TO_MAP_VALUE) {
if (is_mbpf_store(meta)) {
pr_info("map writes not supported\n");
return -EOPNOTSUPP;
}
}
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
pr_vlog(env, "ptr type changed for instruction %d -> %d\n", pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
meta->ptr.type, reg->type); meta->ptr.type, reg->type);
......
...@@ -165,6 +165,7 @@ struct nfp_app { ...@@ -165,6 +165,7 @@ struct nfp_app {
void *priv; void *priv;
}; };
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb); bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
static inline int nfp_app_init(struct nfp_app *app) static inline int nfp_app_init(struct nfp_app *app)
...@@ -326,6 +327,14 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, ...@@ -326,6 +327,14 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
return app->type->xdp_offload(app, nn, prog); return app->type->xdp_offload(app, nn, prog);
} }
static inline bool __nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
skb->data, skb->len);
return __nfp_ctrl_tx(app->ctrl, skb);
}
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb) static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{ {
trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0, trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
......
...@@ -50,6 +50,11 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { ...@@ -50,6 +50,11 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 }, [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
}; };
static bool unreg_is_imm(u16 reg)
{
return (reg & UR_REG_IMM) == UR_REG_IMM;
}
u16 br_get_offset(u64 instr) u16 br_get_offset(u64 instr)
{ {
u16 addr_lo, addr_hi; u16 addr_lo, addr_hi;
...@@ -80,6 +85,59 @@ void br_add_offset(u64 *instr, u16 offset) ...@@ -80,6 +85,59 @@ void br_add_offset(u64 *instr, u16 offset)
br_set_offset(instr, addr + offset); br_set_offset(instr, addr + offset);
} }
static bool immed_can_modify(u64 instr)
{
if (FIELD_GET(OP_IMMED_INV, instr) ||
FIELD_GET(OP_IMMED_SHIFT, instr) ||
FIELD_GET(OP_IMMED_WIDTH, instr) != IMMED_WIDTH_ALL) {
pr_err("Can't decode/encode immed!\n");
return false;
}
return true;
}
u16 immed_get_value(u64 instr)
{
u16 reg;
if (!immed_can_modify(instr))
return 0;
reg = FIELD_GET(OP_IMMED_A_SRC, instr);
if (!unreg_is_imm(reg))
reg = FIELD_GET(OP_IMMED_B_SRC, instr);
return (reg & 0xff) | FIELD_GET(OP_IMMED_IMM, instr);
}
void immed_set_value(u64 *instr, u16 immed)
{
if (!immed_can_modify(*instr))
return;
if (unreg_is_imm(FIELD_GET(OP_IMMED_A_SRC, *instr))) {
*instr &= ~FIELD_PREP(OP_IMMED_A_SRC, 0xff);
*instr |= FIELD_PREP(OP_IMMED_A_SRC, immed & 0xff);
} else {
*instr &= ~FIELD_PREP(OP_IMMED_B_SRC, 0xff);
*instr |= FIELD_PREP(OP_IMMED_B_SRC, immed & 0xff);
}
*instr &= ~OP_IMMED_IMM;
*instr |= FIELD_PREP(OP_IMMED_IMM, immed >> 8);
}
void immed_add_value(u64 *instr, u16 offset)
{
u16 val;
if (!immed_can_modify(*instr))
return;
val = immed_get_value(*instr);
immed_set_value(instr, val + offset);
}
static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
{ {
bool lm_id, lm_dec = false; bool lm_id, lm_dec = false;
......
...@@ -138,6 +138,10 @@ enum immed_shift { ...@@ -138,6 +138,10 @@ enum immed_shift {
IMMED_SHIFT_2B = 2, IMMED_SHIFT_2B = 2,
}; };
u16 immed_get_value(u64 instr);
void immed_set_value(u64 *instr, u16 immed);
void immed_add_value(u64 *instr, u16 offset);
#define OP_SHF_BASE 0x08000000000ULL #define OP_SHF_BASE 0x08000000000ULL
#define OP_SHF_A_SRC 0x000000000ffULL #define OP_SHF_A_SRC 0x000000000ffULL
#define OP_SHF_SC 0x00000000300ULL #define OP_SHF_SC 0x00000000300ULL
......
...@@ -839,6 +839,18 @@ static inline const char *nfp_net_name(struct nfp_net *nn) ...@@ -839,6 +839,18 @@ static inline const char *nfp_net_name(struct nfp_net *nn)
return nn->dp.netdev ? nn->dp.netdev->name : "ctrl"; return nn->dp.netdev ? nn->dp.netdev->name : "ctrl";
} }
static inline void nfp_ctrl_lock(struct nfp_net *nn)
__acquires(&nn->r_vecs[0].lock)
{
spin_lock_bh(&nn->r_vecs[0].lock);
}
static inline void nfp_ctrl_unlock(struct nfp_net *nn)
__releases(&nn->r_vecs[0].lock)
{
spin_unlock_bh(&nn->r_vecs[0].lock);
}
/* Globals */ /* Globals */
extern const char nfp_driver_version[]; extern const char nfp_driver_version[];
......
...@@ -1920,6 +1920,13 @@ nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, ...@@ -1920,6 +1920,13 @@ nfp_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
return false; return false;
} }
bool __nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
return nfp_ctrl_tx_one(nn, r_vec, skb, false);
}
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb) bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb)
{ {
struct nfp_net_r_vector *r_vec = &nn->r_vecs[0]; struct nfp_net_r_vector *r_vec = &nn->r_vecs[0];
......
...@@ -25,6 +25,7 @@ struct bpf_map; ...@@ -25,6 +25,7 @@ struct bpf_map;
/* map is generic key/value storage optionally accesible by eBPF programs */ /* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops { struct bpf_map_ops {
/* funcs callable from userspace (via syscall) */ /* funcs callable from userspace (via syscall) */
int (*map_alloc_check)(union bpf_attr *attr);
struct bpf_map *(*map_alloc)(union bpf_attr *attr); struct bpf_map *(*map_alloc)(union bpf_attr *attr);
void (*map_release)(struct bpf_map *map, struct file *map_file); void (*map_release)(struct bpf_map *map, struct file *map_file);
void (*map_free)(struct bpf_map *map); void (*map_free)(struct bpf_map *map);
...@@ -73,6 +74,33 @@ struct bpf_map { ...@@ -73,6 +74,33 @@ struct bpf_map {
char name[BPF_OBJ_NAME_LEN]; char name[BPF_OBJ_NAME_LEN];
}; };
struct bpf_offloaded_map;
struct bpf_map_dev_ops {
int (*map_get_next_key)(struct bpf_offloaded_map *map,
void *key, void *next_key);
int (*map_lookup_elem)(struct bpf_offloaded_map *map,
void *key, void *value);
int (*map_update_elem)(struct bpf_offloaded_map *map,
void *key, void *value, u64 flags);
int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key);
};
struct bpf_offloaded_map {
struct bpf_map map;
struct net_device *netdev;
const struct bpf_map_dev_ops *dev_ops;
void *dev_priv;
struct list_head offloads;
};
static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map)
{
return container_of(map, struct bpf_offloaded_map, map);
}
extern const struct bpf_map_ops bpf_map_offload_ops;
/* function argument constraints */ /* function argument constraints */
enum bpf_arg_type { enum bpf_arg_type {
ARG_DONTCARE = 0, /* unused argument in helper function */ ARG_DONTCARE = 0, /* unused argument in helper function */
...@@ -199,7 +227,7 @@ struct bpf_prog_offload_ops { ...@@ -199,7 +227,7 @@ struct bpf_prog_offload_ops {
int insn_idx, int prev_insn_idx); int insn_idx, int prev_insn_idx);
}; };
struct bpf_dev_offload { struct bpf_prog_offload {
struct bpf_prog *prog; struct bpf_prog *prog;
struct net_device *netdev; struct net_device *netdev;
void *dev_priv; void *dev_priv;
...@@ -229,7 +257,7 @@ struct bpf_prog_aux { ...@@ -229,7 +257,7 @@ struct bpf_prog_aux {
#ifdef CONFIG_SECURITY #ifdef CONFIG_SECURITY
void *security; void *security;
#endif #endif
struct bpf_dev_offload *offload; struct bpf_prog_offload *offload;
union { union {
struct work_struct work; struct work_struct work;
struct rcu_head rcu; struct rcu_head rcu;
...@@ -368,6 +396,7 @@ int __bpf_prog_charge(struct user_struct *user, u32 pages); ...@@ -368,6 +396,7 @@ int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages); void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
struct bpf_map *bpf_map_get_with_uref(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f); struct bpf_map *__bpf_map_get(struct fd f);
...@@ -377,6 +406,7 @@ void bpf_map_put(struct bpf_map *map); ...@@ -377,6 +406,7 @@ void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages); int bpf_map_precharge_memlock(u32 pages);
void *bpf_map_area_alloc(size_t size, int numa_node); void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base); void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
extern int sysctl_unprivileged_bpf_disabled; extern int sysctl_unprivileged_bpf_disabled;
...@@ -554,6 +584,15 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog); ...@@ -554,6 +584,15 @@ void bpf_prog_offload_destroy(struct bpf_prog *prog);
int bpf_prog_offload_info_fill(struct bpf_prog_info *info, int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
struct bpf_prog *prog); struct bpf_prog *prog);
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value);
int bpf_map_offload_update_elem(struct bpf_map *map,
void *key, void *value, u64 flags);
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
void *key, void *next_key);
bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
...@@ -561,6 +600,14 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) ...@@ -561,6 +600,14 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{ {
return aux->offload_requested; return aux->offload_requested;
} }
static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
return unlikely(map->ops == &bpf_map_offload_ops);
}
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
#else #else
static inline int bpf_prog_offload_init(struct bpf_prog *prog, static inline int bpf_prog_offload_init(struct bpf_prog *prog,
union bpf_attr *attr) union bpf_attr *attr)
...@@ -572,6 +619,20 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) ...@@ -572,6 +619,20 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
{ {
return false; return false;
} }
static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
{
return false;
}
static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
#if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET)
......
...@@ -804,6 +804,8 @@ enum bpf_netdev_command { ...@@ -804,6 +804,8 @@ enum bpf_netdev_command {
BPF_OFFLOAD_VERIFIER_PREP, BPF_OFFLOAD_VERIFIER_PREP,
BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_TRANSLATE,
BPF_OFFLOAD_DESTROY, BPF_OFFLOAD_DESTROY,
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
}; };
struct bpf_prog_offload_ops; struct bpf_prog_offload_ops;
...@@ -834,6 +836,10 @@ struct netdev_bpf { ...@@ -834,6 +836,10 @@ struct netdev_bpf {
struct { struct {
struct bpf_prog *prog; struct bpf_prog *prog;
} offload; } offload;
/* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */
struct {
struct bpf_offloaded_map *offmap;
};
}; };
}; };
......
...@@ -245,6 +245,7 @@ union bpf_attr { ...@@ -245,6 +245,7 @@ union bpf_attr {
* BPF_F_NUMA_NODE is set). * BPF_F_NUMA_NODE is set).
*/ */
char map_name[BPF_OBJ_NAME_LEN]; char map_name[BPF_OBJ_NAME_LEN];
__u32 map_ifindex; /* ifindex of netdev to create on */
}; };
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
......
...@@ -94,13 +94,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) ...@@ -94,13 +94,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
if (!cmap) if (!cmap)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* mandatory map attributes */ bpf_map_init_from_attr(&cmap->map, attr);
cmap->map.map_type = attr->map_type;
cmap->map.key_size = attr->key_size;
cmap->map.value_size = attr->value_size;
cmap->map.max_entries = attr->max_entries;
cmap->map.map_flags = attr->map_flags;
cmap->map.numa_node = bpf_map_attr_numa_node(attr);
/* Pre-limit array size based on NR_CPUS, not final CPU check */ /* Pre-limit array size based on NR_CPUS, not final CPU check */
if (cmap->map.max_entries > NR_CPUS) { if (cmap->map.max_entries > NR_CPUS) {
......
...@@ -93,13 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) ...@@ -93,13 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
if (!dtab) if (!dtab)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* mandatory map attributes */ bpf_map_init_from_attr(&dtab->map, attr);
dtab->map.map_type = attr->map_type;
dtab->map.key_size = attr->key_size;
dtab->map.value_size = attr->value_size;
dtab->map.max_entries = attr->max_entries;
dtab->map.map_flags = attr->map_flags;
dtab->map.numa_node = bpf_map_attr_numa_node(attr);
/* make sure page count doesn't overflow */ /* make sure page count doesn't overflow */
cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
......
...@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab) ...@@ -227,7 +227,7 @@ static int alloc_extra_elems(struct bpf_htab *htab)
} }
/* Called from syscall */ /* Called from syscall */
static struct bpf_map *htab_map_alloc(union bpf_attr *attr) static int htab_map_alloc_check(union bpf_attr *attr)
{ {
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
...@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -241,9 +241,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
int numa_node = bpf_map_attr_numa_node(attr); int numa_node = bpf_map_attr_numa_node(attr);
struct bpf_htab *htab;
int err, i;
u64 cost;
BUILD_BUG_ON(offsetof(struct htab_elem, htab) != BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
offsetof(struct htab_elem, hash_node.pprev)); offsetof(struct htab_elem, hash_node.pprev));
...@@ -254,40 +251,68 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -254,40 +251,68 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* LRU implementation is much complicated than other /* LRU implementation is much complicated than other
* maps. Hence, limit to CAP_SYS_ADMIN for now. * maps. Hence, limit to CAP_SYS_ADMIN for now.
*/ */
return ERR_PTR(-EPERM); return -EPERM;
if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK) if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
/* reserved bits should not be used */ /* reserved bits should not be used */
return ERR_PTR(-EINVAL); return -EINVAL;
if (!lru && percpu_lru) if (!lru && percpu_lru)
return ERR_PTR(-EINVAL); return -EINVAL;
if (lru && !prealloc) if (lru && !prealloc)
return ERR_PTR(-ENOTSUPP); return -ENOTSUPP;
if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
return ERR_PTR(-EINVAL); return -EINVAL;
/* check sanity of attributes.
* value_size == 0 may be allowed in the future to use map as a set
*/
if (attr->max_entries == 0 || attr->key_size == 0 ||
attr->value_size == 0)
return -EINVAL;
if (attr->key_size > MAX_BPF_STACK)
/* eBPF programs initialize keys on stack, so they cannot be
* larger than max stack size
*/
return -E2BIG;
if (attr->value_size >= KMALLOC_MAX_SIZE -
MAX_BPF_STACK - sizeof(struct htab_elem))
/* if value_size is bigger, the user space won't be able to
* access the elements via bpf syscall. This check also makes
* sure that the elem_size doesn't overflow and it's
* kmalloc-able later in htab_map_update_elem()
*/
return -E2BIG;
return 0;
}
static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
{
bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
/* percpu_lru means each cpu has its own LRU list.
* it is different from BPF_MAP_TYPE_PERCPU_HASH where
* the map's value itself is percpu. percpu_lru has
* nothing to do with the map's value.
*/
bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
struct bpf_htab *htab;
int err, i;
u64 cost;
htab = kzalloc(sizeof(*htab), GFP_USER); htab = kzalloc(sizeof(*htab), GFP_USER);
if (!htab) if (!htab)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* mandatory map attributes */ bpf_map_init_from_attr(&htab->map, attr);
htab->map.map_type = attr->map_type;
htab->map.key_size = attr->key_size;
htab->map.value_size = attr->value_size;
htab->map.max_entries = attr->max_entries;
htab->map.map_flags = attr->map_flags;
htab->map.numa_node = numa_node;
/* check sanity of attributes.
* value_size == 0 may be allowed in the future to use map as a set
*/
err = -EINVAL;
if (htab->map.max_entries == 0 || htab->map.key_size == 0 ||
htab->map.value_size == 0)
goto free_htab;
if (percpu_lru) { if (percpu_lru) {
/* ensure each CPU's lru list has >=1 elements. /* ensure each CPU's lru list has >=1 elements.
...@@ -304,22 +329,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -304,22 +329,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
/* hash table size must be power of 2 */ /* hash table size must be power of 2 */
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
err = -E2BIG;
if (htab->map.key_size > MAX_BPF_STACK)
/* eBPF programs initialize keys on stack, so they cannot be
* larger than max stack size
*/
goto free_htab;
if (htab->map.value_size >= KMALLOC_MAX_SIZE -
MAX_BPF_STACK - sizeof(struct htab_elem))
/* if value_size is bigger, the user space won't be able to
* access the elements via bpf syscall. This check also makes
* sure that the elem_size doesn't overflow and it's
* kmalloc-able later in htab_map_update_elem()
*/
goto free_htab;
htab->elem_size = sizeof(struct htab_elem) + htab->elem_size = sizeof(struct htab_elem) +
round_up(htab->map.key_size, 8); round_up(htab->map.key_size, 8);
if (percpu) if (percpu)
...@@ -327,6 +336,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ...@@ -327,6 +336,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
else else
htab->elem_size += round_up(htab->map.value_size, 8); htab->elem_size += round_up(htab->map.value_size, 8);
err = -E2BIG;
/* prevent zero size kmalloc and check for u32 overflow */ /* prevent zero size kmalloc and check for u32 overflow */
if (htab->n_buckets == 0 || if (htab->n_buckets == 0 ||
htab->n_buckets > U32_MAX / sizeof(struct bucket)) htab->n_buckets > U32_MAX / sizeof(struct bucket))
...@@ -1143,6 +1153,7 @@ static void htab_map_free(struct bpf_map *map) ...@@ -1143,6 +1153,7 @@ static void htab_map_free(struct bpf_map *map)
} }
const struct bpf_map_ops htab_map_ops = { const struct bpf_map_ops htab_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc, .map_alloc = htab_map_alloc,
.map_free = htab_map_free, .map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
...@@ -1153,6 +1164,7 @@ const struct bpf_map_ops htab_map_ops = { ...@@ -1153,6 +1164,7 @@ const struct bpf_map_ops htab_map_ops = {
}; };
const struct bpf_map_ops htab_lru_map_ops = { const struct bpf_map_ops htab_lru_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc, .map_alloc = htab_map_alloc,
.map_free = htab_map_free, .map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
...@@ -1236,6 +1248,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, ...@@ -1236,6 +1248,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
} }
const struct bpf_map_ops htab_percpu_map_ops = { const struct bpf_map_ops htab_percpu_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc, .map_alloc = htab_map_alloc,
.map_free = htab_map_free, .map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
...@@ -1245,6 +1258,7 @@ const struct bpf_map_ops htab_percpu_map_ops = { ...@@ -1245,6 +1258,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
}; };
const struct bpf_map_ops htab_lru_percpu_map_ops = { const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc, .map_alloc = htab_map_alloc,
.map_free = htab_map_free, .map_free = htab_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
...@@ -1253,11 +1267,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = { ...@@ -1253,11 +1267,11 @@ const struct bpf_map_ops htab_lru_percpu_map_ops = {
.map_delete_elem = htab_lru_map_delete_elem, .map_delete_elem = htab_lru_map_delete_elem,
}; };
static struct bpf_map *fd_htab_map_alloc(union bpf_attr *attr) static int fd_htab_map_alloc_check(union bpf_attr *attr)
{ {
if (attr->value_size != sizeof(u32)) if (attr->value_size != sizeof(u32))
return ERR_PTR(-EINVAL); return -EINVAL;
return htab_map_alloc(attr); return htab_map_alloc_check(attr);
} }
static void fd_htab_map_free(struct bpf_map *map) static void fd_htab_map_free(struct bpf_map *map)
...@@ -1328,7 +1342,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) ...@@ -1328,7 +1342,7 @@ static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
if (IS_ERR(inner_map_meta)) if (IS_ERR(inner_map_meta))
return inner_map_meta; return inner_map_meta;
map = fd_htab_map_alloc(attr); map = htab_map_alloc(attr);
if (IS_ERR(map)) { if (IS_ERR(map)) {
bpf_map_meta_free(inner_map_meta); bpf_map_meta_free(inner_map_meta);
return map; return map;
...@@ -1372,6 +1386,7 @@ static void htab_of_map_free(struct bpf_map *map) ...@@ -1372,6 +1386,7 @@ static void htab_of_map_free(struct bpf_map *map)
} }
const struct bpf_map_ops htab_of_maps_map_ops = { const struct bpf_map_ops htab_of_maps_map_ops = {
.map_alloc_check = fd_htab_map_alloc_check,
.map_alloc = htab_of_map_alloc, .map_alloc = htab_of_map_alloc,
.map_free = htab_of_map_free, .map_free = htab_of_map_free,
.map_get_next_key = htab_map_get_next_key, .map_get_next_key = htab_map_get_next_key,
......
...@@ -522,12 +522,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) ...@@ -522,12 +522,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* copy mandatory map attributes */ /* copy mandatory map attributes */
trie->map.map_type = attr->map_type; bpf_map_init_from_attr(&trie->map, attr);
trie->map.key_size = attr->key_size;
trie->map.value_size = attr->value_size;
trie->map.max_entries = attr->max_entries;
trie->map.map_flags = attr->map_flags;
trie->map.numa_node = bpf_map_attr_numa_node(attr);
trie->data_size = attr->key_size - trie->data_size = attr->key_size -
offsetof(struct bpf_lpm_trie_key, data); offsetof(struct bpf_lpm_trie_key, data);
trie->max_prefixlen = trie->data_size * 8; trie->max_prefixlen = trie->data_size * 8;
......
...@@ -24,15 +24,27 @@ ...@@ -24,15 +24,27 @@
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
/* Protects bpf_prog_offload_devs and offload members of all progs. /* Protects bpf_prog_offload_devs, bpf_map_offload_devs and offload members
* of all progs.
* RTNL lock cannot be taken when holding this lock. * RTNL lock cannot be taken when holding this lock.
*/ */
static DECLARE_RWSEM(bpf_devs_lock); static DECLARE_RWSEM(bpf_devs_lock);
static LIST_HEAD(bpf_prog_offload_devs); static LIST_HEAD(bpf_prog_offload_devs);
static LIST_HEAD(bpf_map_offload_devs);
static int bpf_dev_offload_check(struct net_device *netdev)
{
if (!netdev)
return -EINVAL;
if (!netdev->netdev_ops->ndo_bpf)
return -EOPNOTSUPP;
return 0;
}
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
{ {
struct bpf_dev_offload *offload; struct bpf_prog_offload *offload;
int err;
if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
attr->prog_type != BPF_PROG_TYPE_XDP) attr->prog_type != BPF_PROG_TYPE_XDP)
...@@ -49,12 +61,15 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) ...@@ -49,12 +61,15 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
offload->netdev = dev_get_by_index(current->nsproxy->net_ns, offload->netdev = dev_get_by_index(current->nsproxy->net_ns,
attr->prog_ifindex); attr->prog_ifindex);
if (!offload->netdev) err = bpf_dev_offload_check(offload->netdev);
goto err_free; if (err)
goto err_maybe_put;
down_write(&bpf_devs_lock); down_write(&bpf_devs_lock);
if (offload->netdev->reg_state != NETREG_REGISTERED) if (offload->netdev->reg_state != NETREG_REGISTERED) {
err = -EINVAL;
goto err_unlock; goto err_unlock;
}
prog->aux->offload = offload; prog->aux->offload = offload;
list_add_tail(&offload->offloads, &bpf_prog_offload_devs); list_add_tail(&offload->offloads, &bpf_prog_offload_devs);
dev_put(offload->netdev); dev_put(offload->netdev);
...@@ -63,16 +78,17 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) ...@@ -63,16 +78,17 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr)
return 0; return 0;
err_unlock: err_unlock:
up_write(&bpf_devs_lock); up_write(&bpf_devs_lock);
dev_put(offload->netdev); err_maybe_put:
err_free: if (offload->netdev)
dev_put(offload->netdev);
kfree(offload); kfree(offload);
return -EINVAL; return err;
} }
static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd, static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
struct netdev_bpf *data) struct netdev_bpf *data)
{ {
struct bpf_dev_offload *offload = prog->aux->offload; struct bpf_prog_offload *offload = prog->aux->offload;
struct net_device *netdev; struct net_device *netdev;
ASSERT_RTNL(); ASSERT_RTNL();
...@@ -80,8 +96,6 @@ static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd, ...@@ -80,8 +96,6 @@ static int __bpf_offload_ndo(struct bpf_prog *prog, enum bpf_netdev_command cmd,
if (!offload) if (!offload)
return -ENODEV; return -ENODEV;
netdev = offload->netdev; netdev = offload->netdev;
if (!netdev->netdev_ops->ndo_bpf)
return -EOPNOTSUPP;
data->command = cmd; data->command = cmd;
...@@ -110,7 +124,7 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) ...@@ -110,7 +124,7 @@ int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
int insn_idx, int prev_insn_idx) int insn_idx, int prev_insn_idx)
{ {
struct bpf_dev_offload *offload; struct bpf_prog_offload *offload;
int ret = -ENODEV; int ret = -ENODEV;
down_read(&bpf_devs_lock); down_read(&bpf_devs_lock);
...@@ -124,7 +138,7 @@ int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, ...@@ -124,7 +138,7 @@ int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env,
static void __bpf_prog_offload_destroy(struct bpf_prog *prog) static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
{ {
struct bpf_dev_offload *offload = prog->aux->offload; struct bpf_prog_offload *offload = prog->aux->offload;
struct netdev_bpf data = {}; struct netdev_bpf data = {};
data.offload.prog = prog; data.offload.prog = prog;
...@@ -238,11 +252,186 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info, ...@@ -238,11 +252,186 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
const struct bpf_prog_ops bpf_offload_prog_ops = { const struct bpf_prog_ops bpf_offload_prog_ops = {
}; };
static int bpf_map_offload_ndo(struct bpf_offloaded_map *offmap,
enum bpf_netdev_command cmd)
{
struct netdev_bpf data = {};
struct net_device *netdev;
ASSERT_RTNL();
data.command = cmd;
data.offmap = offmap;
/* Caller must make sure netdev is valid */
netdev = offmap->netdev;
return netdev->netdev_ops->ndo_bpf(netdev, &data);
}
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
{
struct net *net = current->nsproxy->net_ns;
struct bpf_offloaded_map *offmap;
int err;
if (!capable(CAP_SYS_ADMIN))
return ERR_PTR(-EPERM);
if (attr->map_type != BPF_MAP_TYPE_HASH)
return ERR_PTR(-EINVAL);
offmap = kzalloc(sizeof(*offmap), GFP_USER);
if (!offmap)
return ERR_PTR(-ENOMEM);
bpf_map_init_from_attr(&offmap->map, attr);
rtnl_lock();
down_write(&bpf_devs_lock);
offmap->netdev = __dev_get_by_index(net, attr->map_ifindex);
err = bpf_dev_offload_check(offmap->netdev);
if (err)
goto err_unlock;
err = bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_ALLOC);
if (err)
goto err_unlock;
list_add_tail(&offmap->offloads, &bpf_map_offload_devs);
up_write(&bpf_devs_lock);
rtnl_unlock();
return &offmap->map;
err_unlock:
up_write(&bpf_devs_lock);
rtnl_unlock();
kfree(offmap);
return ERR_PTR(err);
}
static void __bpf_map_offload_destroy(struct bpf_offloaded_map *offmap)
{
WARN_ON(bpf_map_offload_ndo(offmap, BPF_OFFLOAD_MAP_FREE));
/* Make sure BPF_MAP_GET_NEXT_ID can't find this dead map */
bpf_map_free_id(&offmap->map, true);
list_del_init(&offmap->offloads);
offmap->netdev = NULL;
}
void bpf_map_offload_map_free(struct bpf_map *map)
{
struct bpf_offloaded_map *offmap = map_to_offmap(map);
rtnl_lock();
down_write(&bpf_devs_lock);
if (offmap->netdev)
__bpf_map_offload_destroy(offmap);
up_write(&bpf_devs_lock);
rtnl_unlock();
kfree(offmap);
}
int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value)
{
struct bpf_offloaded_map *offmap = map_to_offmap(map);
int ret = -ENODEV;
down_read(&bpf_devs_lock);
if (offmap->netdev)
ret = offmap->dev_ops->map_lookup_elem(offmap, key, value);
up_read(&bpf_devs_lock);
return ret;
}
int bpf_map_offload_update_elem(struct bpf_map *map,
void *key, void *value, u64 flags)
{
struct bpf_offloaded_map *offmap = map_to_offmap(map);
int ret = -ENODEV;
if (unlikely(flags > BPF_EXIST))
return -EINVAL;
down_read(&bpf_devs_lock);
if (offmap->netdev)
ret = offmap->dev_ops->map_update_elem(offmap, key, value,
flags);
up_read(&bpf_devs_lock);
return ret;
}
int bpf_map_offload_delete_elem(struct bpf_map *map, void *key)
{
struct bpf_offloaded_map *offmap = map_to_offmap(map);
int ret = -ENODEV;
down_read(&bpf_devs_lock);
if (offmap->netdev)
ret = offmap->dev_ops->map_delete_elem(offmap, key);
up_read(&bpf_devs_lock);
return ret;
}
int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_offloaded_map *offmap = map_to_offmap(map);
int ret = -ENODEV;
down_read(&bpf_devs_lock);
if (offmap->netdev)
ret = offmap->dev_ops->map_get_next_key(offmap, key, next_key);
up_read(&bpf_devs_lock);
return ret;
}
bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map)
{
struct bpf_offloaded_map *offmap;
struct bpf_prog_offload *offload;
bool ret;
if (!!bpf_prog_is_dev_bound(prog->aux) != !!bpf_map_is_dev_bound(map))
return false;
if (!bpf_prog_is_dev_bound(prog->aux))
return true;
down_read(&bpf_devs_lock);
offload = prog->aux->offload;
offmap = map_to_offmap(map);
ret = offload && offload->netdev == offmap->netdev;
up_read(&bpf_devs_lock);
return ret;
}
static void bpf_offload_orphan_all_progs(struct net_device *netdev)
{
struct bpf_prog_offload *offload, *tmp;
list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads)
if (offload->netdev == netdev)
__bpf_prog_offload_destroy(offload->prog);
}
static void bpf_offload_orphan_all_maps(struct net_device *netdev)
{
struct bpf_offloaded_map *offmap, *tmp;
list_for_each_entry_safe(offmap, tmp, &bpf_map_offload_devs, offloads)
if (offmap->netdev == netdev)
__bpf_map_offload_destroy(offmap);
}
static int bpf_offload_notification(struct notifier_block *notifier, static int bpf_offload_notification(struct notifier_block *notifier,
ulong event, void *ptr) ulong event, void *ptr)
{ {
struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct bpf_dev_offload *offload, *tmp;
ASSERT_RTNL(); ASSERT_RTNL();
...@@ -253,11 +442,8 @@ static int bpf_offload_notification(struct notifier_block *notifier, ...@@ -253,11 +442,8 @@ static int bpf_offload_notification(struct notifier_block *notifier,
break; break;
down_write(&bpf_devs_lock); down_write(&bpf_devs_lock);
list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, bpf_offload_orphan_all_progs(netdev);
offloads) { bpf_offload_orphan_all_maps(netdev);
if (offload->netdev == netdev)
__bpf_prog_offload_destroy(offload->prog);
}
up_write(&bpf_devs_lock); up_write(&bpf_devs_lock);
break; break;
default: default:
......
...@@ -513,13 +513,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) ...@@ -513,13 +513,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
if (!stab) if (!stab)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* mandatory map attributes */ bpf_map_init_from_attr(&stab->map, attr);
stab->map.map_type = attr->map_type;
stab->map.key_size = attr->key_size;
stab->map.value_size = attr->value_size;
stab->map.max_entries = attr->max_entries;
stab->map.map_flags = attr->map_flags;
stab->map.numa_node = bpf_map_attr_numa_node(attr);
/* make sure page count doesn't overflow */ /* make sure page count doesn't overflow */
cost = (u64) stab->map.max_entries * sizeof(struct sock *); cost = (u64) stab->map.max_entries * sizeof(struct sock *);
......
...@@ -88,14 +88,10 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) ...@@ -88,14 +88,10 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
if (cost >= U32_MAX - PAGE_SIZE) if (cost >= U32_MAX - PAGE_SIZE)
goto free_smap; goto free_smap;
smap->map.map_type = attr->map_type; bpf_map_init_from_attr(&smap->map, attr);
smap->map.key_size = attr->key_size;
smap->map.value_size = value_size; smap->map.value_size = value_size;
smap->map.max_entries = attr->max_entries;
smap->map.map_flags = attr->map_flags;
smap->n_buckets = n_buckets; smap->n_buckets = n_buckets;
smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
smap->map.numa_node = bpf_map_attr_numa_node(attr);
err = bpf_map_precharge_memlock(smap->map.pages); err = bpf_map_precharge_memlock(smap->map.pages);
if (err) if (err)
......
...@@ -94,18 +94,34 @@ static int check_uarg_tail_zero(void __user *uaddr, ...@@ -94,18 +94,34 @@ static int check_uarg_tail_zero(void __user *uaddr,
return 0; return 0;
} }
const struct bpf_map_ops bpf_map_offload_ops = {
.map_alloc = bpf_map_offload_map_alloc,
.map_free = bpf_map_offload_map_free,
};
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
{ {
const struct bpf_map_ops *ops;
struct bpf_map *map; struct bpf_map *map;
int err;
if (attr->map_type >= ARRAY_SIZE(bpf_map_types) || if (attr->map_type >= ARRAY_SIZE(bpf_map_types))
!bpf_map_types[attr->map_type]) return ERR_PTR(-EINVAL);
ops = bpf_map_types[attr->map_type];
if (!ops)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
map = bpf_map_types[attr->map_type]->map_alloc(attr); if (ops->map_alloc_check) {
err = ops->map_alloc_check(attr);
if (err)
return ERR_PTR(err);
}
if (attr->map_ifindex)
ops = &bpf_map_offload_ops;
map = ops->map_alloc(attr);
if (IS_ERR(map)) if (IS_ERR(map))
return map; return map;
map->ops = bpf_map_types[attr->map_type]; map->ops = ops;
map->map_type = attr->map_type; map->map_type = attr->map_type;
return map; return map;
} }
...@@ -134,6 +150,16 @@ void bpf_map_area_free(void *area) ...@@ -134,6 +150,16 @@ void bpf_map_area_free(void *area)
kvfree(area); kvfree(area);
} }
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
{
map->map_type = attr->map_type;
map->key_size = attr->key_size;
map->value_size = attr->value_size;
map->max_entries = attr->max_entries;
map->map_flags = attr->map_flags;
map->numa_node = bpf_map_attr_numa_node(attr);
}
int bpf_map_precharge_memlock(u32 pages) int bpf_map_precharge_memlock(u32 pages)
{ {
struct user_struct *user = get_current_user(); struct user_struct *user = get_current_user();
...@@ -189,16 +215,25 @@ static int bpf_map_alloc_id(struct bpf_map *map) ...@@ -189,16 +215,25 @@ static int bpf_map_alloc_id(struct bpf_map *map)
return id > 0 ? 0 : id; return id > 0 ? 0 : id;
} }
static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
{ {
unsigned long flags; unsigned long flags;
/* Offloaded maps are removed from the IDR store when their device
* disappears - even if someone holds an fd to them they are unusable,
* the memory is gone, all ops will fail; they are simply waiting for
* refcnt to drop to be freed.
*/
if (!map->id)
return;
if (do_idr_lock) if (do_idr_lock)
spin_lock_irqsave(&map_idr_lock, flags); spin_lock_irqsave(&map_idr_lock, flags);
else else
__acquire(&map_idr_lock); __acquire(&map_idr_lock);
idr_remove(&map_idr, map->id); idr_remove(&map_idr, map->id);
map->id = 0;
if (do_idr_lock) if (do_idr_lock)
spin_unlock_irqrestore(&map_idr_lock, flags); spin_unlock_irqrestore(&map_idr_lock, flags);
...@@ -378,7 +413,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src) ...@@ -378,7 +413,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
return 0; return 0;
} }
#define BPF_MAP_CREATE_LAST_FIELD map_name #define BPF_MAP_CREATE_LAST_FIELD map_ifindex
/* called via syscall */ /* called via syscall */
static int map_create(union bpf_attr *attr) static int map_create(union bpf_attr *attr)
{ {
...@@ -566,8 +601,10 @@ static int map_lookup_elem(union bpf_attr *attr) ...@@ -566,8 +601,10 @@ static int map_lookup_elem(union bpf_attr *attr)
if (!value) if (!value)
goto free_key; goto free_key;
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || if (bpf_map_is_dev_bound(map)) {
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { err = bpf_map_offload_lookup_elem(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_copy(map, key, value); err = bpf_percpu_hash_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_copy(map, key, value); err = bpf_percpu_array_copy(map, key, value);
...@@ -654,7 +691,10 @@ static int map_update_elem(union bpf_attr *attr) ...@@ -654,7 +691,10 @@ static int map_update_elem(union bpf_attr *attr)
goto free_value; goto free_value;
/* Need to create a kthread, thus must support schedule */ /* Need to create a kthread, thus must support schedule */
if (map->map_type == BPF_MAP_TYPE_CPUMAP) { if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_update_elem(map, key, value, attr->flags);
goto out;
} else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
err = map->ops->map_update_elem(map, key, value, attr->flags); err = map->ops->map_update_elem(map, key, value, attr->flags);
goto out; goto out;
} }
...@@ -731,6 +771,11 @@ static int map_delete_elem(union bpf_attr *attr) ...@@ -731,6 +771,11 @@ static int map_delete_elem(union bpf_attr *attr)
goto err_put; goto err_put;
} }
if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_delete_elem(map, key);
goto out;
}
preempt_disable(); preempt_disable();
__this_cpu_inc(bpf_prog_active); __this_cpu_inc(bpf_prog_active);
rcu_read_lock(); rcu_read_lock();
...@@ -738,7 +783,7 @@ static int map_delete_elem(union bpf_attr *attr) ...@@ -738,7 +783,7 @@ static int map_delete_elem(union bpf_attr *attr)
rcu_read_unlock(); rcu_read_unlock();
__this_cpu_dec(bpf_prog_active); __this_cpu_dec(bpf_prog_active);
preempt_enable(); preempt_enable();
out:
if (!err) if (!err)
trace_bpf_map_delete_elem(map, ufd, key); trace_bpf_map_delete_elem(map, ufd, key);
kfree(key); kfree(key);
...@@ -788,9 +833,15 @@ static int map_get_next_key(union bpf_attr *attr) ...@@ -788,9 +833,15 @@ static int map_get_next_key(union bpf_attr *attr)
if (!next_key) if (!next_key)
goto free_key; goto free_key;
if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_get_next_key(map, key, next_key);
goto out;
}
rcu_read_lock(); rcu_read_lock();
err = map->ops->map_get_next_key(map, key, next_key); err = map->ops->map_get_next_key(map, key, next_key);
rcu_read_unlock(); rcu_read_unlock();
out:
if (err) if (err)
goto free_next_key; goto free_next_key;
......
...@@ -4816,6 +4816,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, ...@@ -4816,6 +4816,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
return -EINVAL; return -EINVAL;
} }
} }
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
!bpf_offload_dev_match(prog, map)) {
verbose(env, "offload device mismatch between prog and map\n");
return -EINVAL;
}
return 0; return 0;
} }
......
...@@ -245,6 +245,7 @@ union bpf_attr { ...@@ -245,6 +245,7 @@ union bpf_attr {
* BPF_F_NUMA_NODE is set). * BPF_F_NUMA_NODE is set).
*/ */
char map_name[BPF_OBJ_NAME_LEN]; char map_name[BPF_OBJ_NAME_LEN];
__u32 map_ifindex; /* ifindex of netdev to create on */
}; };
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment