Commit 0c9864c0 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by Daniel Borkmann

nfp: bpf: allow control message sizing for map ops

In current ABI the size of the messages carrying map elements was
statically defined to at most 16 words of key and 16 words of value
(NFP word is 4 bytes).  We should not make this assumption and use
the max key and value sizes from the BPF capability instead.

To make sure old kernels don't get surprised with larger (or smaller)
messages bump the FW ABI version to 3 when key/value size is different
than 16 words.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 9bbdd41b
...@@ -89,15 +89,32 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size) ...@@ -89,15 +89,32 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
return skb; return skb;
} }
static unsigned int
nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
{
unsigned int size;
size = sizeof(struct cmsg_req_map_op);
size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
return size;
}
static struct sk_buff * static struct sk_buff *
nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n) nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
{
return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
}
static unsigned int
nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
{ {
unsigned int size; unsigned int size;
size = sizeof(struct cmsg_req_map_op); size = sizeof(struct cmsg_reply_map_op);
size += sizeof(struct cmsg_key_value_pair) * n; size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
return nfp_bpf_cmsg_alloc(bpf, size); return size;
} }
static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb) static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
...@@ -338,6 +355,34 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map) ...@@ -338,6 +355,34 @@ void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
} }
static void *
nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
unsigned int n)
{
return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
}
static void *
nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
unsigned int n)
{
return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
}
static void *
nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
unsigned int n)
{
return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
}
static void *
nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
unsigned int n)
{
return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
}
static int static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
enum nfp_bpf_cmsg_type op, enum nfp_bpf_cmsg_type op,
...@@ -366,12 +411,13 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, ...@@ -366,12 +411,13 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
/* Copy inputs */ /* Copy inputs */
if (key) if (key)
memcpy(&req->elem[0].key, key, map->key_size); memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
if (value) if (value)
memcpy(&req->elem[0].value, value, map->value_size); memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
map->value_size);
skb = nfp_bpf_cmsg_communicate(bpf, skb, op, skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
sizeof(*reply) + sizeof(*reply->elem)); nfp_bpf_cmsg_map_reply_size(bpf, 1));
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -382,9 +428,11 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, ...@@ -382,9 +428,11 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
/* Copy outputs */ /* Copy outputs */
if (out_key) if (out_key)
memcpy(out_key, &reply->elem[0].key, map->key_size); memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
map->key_size);
if (out_value) if (out_value)
memcpy(out_value, &reply->elem[0].value, map->value_size); memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
map->value_size);
dev_consume_skb_any(skb); dev_consume_skb_any(skb);
...@@ -428,6 +476,13 @@ int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap, ...@@ -428,6 +476,13 @@ int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
key, NULL, 0, next_key, NULL); key, NULL, 0, next_key, NULL);
} }
unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
{
return max3((unsigned int)NFP_NET_DEFAULT_MTU,
nfp_bpf_cmsg_map_req_size(bpf, 1),
nfp_bpf_cmsg_map_reply_size(bpf, 1));
}
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb) void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
{ {
struct nfp_app_bpf *bpf = app->priv; struct nfp_app_bpf *bpf = app->priv;
......
...@@ -99,6 +99,7 @@ enum nfp_bpf_cmsg_type { ...@@ -99,6 +99,7 @@ enum nfp_bpf_cmsg_type {
#define CMSG_TYPE_MAP_REPLY_BIT 7 #define CMSG_TYPE_MAP_REPLY_BIT 7
#define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req)) #define __CMSG_REPLY(req) (BIT(CMSG_TYPE_MAP_REPLY_BIT) | (req))
/* BPF ABIv2 fixed-length control message fields */
#define CMSG_MAP_KEY_LW 16 #define CMSG_MAP_KEY_LW 16
#define CMSG_MAP_VALUE_LW 16 #define CMSG_MAP_VALUE_LW 16
...@@ -148,24 +149,19 @@ struct cmsg_reply_map_free_tbl { ...@@ -148,24 +149,19 @@ struct cmsg_reply_map_free_tbl {
__be32 count; __be32 count;
}; };
struct cmsg_key_value_pair {
__be32 key[CMSG_MAP_KEY_LW];
__be32 value[CMSG_MAP_VALUE_LW];
};
struct cmsg_req_map_op { struct cmsg_req_map_op {
struct cmsg_hdr hdr; struct cmsg_hdr hdr;
__be32 tid; __be32 tid;
__be32 count; __be32 count;
__be32 flags; __be32 flags;
struct cmsg_key_value_pair elem[0]; u8 data[0];
}; };
struct cmsg_reply_map_op { struct cmsg_reply_map_op {
struct cmsg_reply_map_simple reply_hdr; struct cmsg_reply_map_simple reply_hdr;
__be32 count; __be32 count;
__be32 resv; __be32 resv;
struct cmsg_key_value_pair elem[0]; u8 data[0];
}; };
struct cmsg_bpf_event { struct cmsg_bpf_event {
......
...@@ -356,7 +356,7 @@ nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value, ...@@ -356,7 +356,7 @@ nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value,
} }
bpf->abi_version = readl(value); bpf->abi_version = readl(value);
if (bpf->abi_version != 2) { if (bpf->abi_version < 2 || bpf->abi_version > 3) {
nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n", nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n",
bpf->abi_version); bpf->abi_version);
bpf->abi_version = 0; bpf->abi_version = 0;
...@@ -486,6 +486,15 @@ static int nfp_bpf_init(struct nfp_app *app) ...@@ -486,6 +486,15 @@ static int nfp_bpf_init(struct nfp_app *app)
if (err) if (err)
goto err_free_neutral_maps; goto err_free_neutral_maps;
if (bpf->abi_version < 3) {
bpf->cmsg_key_sz = CMSG_MAP_KEY_LW * 4;
bpf->cmsg_val_sz = CMSG_MAP_VALUE_LW * 4;
} else {
bpf->cmsg_key_sz = bpf->maps.max_key_sz;
bpf->cmsg_val_sz = bpf->maps.max_val_sz;
app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
}
bpf->bpf_dev = bpf_offload_dev_create(); bpf->bpf_dev = bpf_offload_dev_create();
err = PTR_ERR_OR_ZERO(bpf->bpf_dev); err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
if (err) if (err)
......
...@@ -121,6 +121,9 @@ enum pkt_vec { ...@@ -121,6 +121,9 @@ enum pkt_vec {
* @cmsg_replies: received cmsg replies waiting to be consumed * @cmsg_replies: received cmsg replies waiting to be consumed
* @cmsg_wq: work queue for waiting for cmsg replies * @cmsg_wq: work queue for waiting for cmsg replies
* *
* @cmsg_key_sz: size of key in cmsg element array
* @cmsg_val_sz: size of value in cmsg element array
*
* @map_list: list of offloaded maps * @map_list: list of offloaded maps
* @maps_in_use: number of currently offloaded maps * @maps_in_use: number of currently offloaded maps
* @map_elems_in_use: number of elements allocated to offloaded maps * @map_elems_in_use: number of elements allocated to offloaded maps
...@@ -166,6 +169,9 @@ struct nfp_app_bpf { ...@@ -166,6 +169,9 @@ struct nfp_app_bpf {
struct sk_buff_head cmsg_replies; struct sk_buff_head cmsg_replies;
struct wait_queue_head cmsg_wq; struct wait_queue_head cmsg_wq;
unsigned int cmsg_key_sz;
unsigned int cmsg_val_sz;
struct list_head map_list; struct list_head map_list;
unsigned int maps_in_use; unsigned int maps_in_use;
unsigned int map_elems_in_use; unsigned int map_elems_in_use;
...@@ -496,6 +502,7 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -496,6 +502,7 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
long long int long long int
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
void void
......
...@@ -264,7 +264,6 @@ ...@@ -264,7 +264,6 @@
* %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/ */
#define NFP_NET_CFG_BPF_ABI 0x0080 #define NFP_NET_CFG_BPF_ABI 0x0080
#define NFP_NET_BPF_ABI 2
#define NFP_NET_CFG_BPF_CAP 0x0081 #define NFP_NET_CFG_BPF_CAP 0x0081
#define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */ #define NFP_NET_BPF_CAP_RELO (1 << 0) /* seamless reload */
#define NFP_NET_CFG_BPF_MAX_LEN 0x0082 #define NFP_NET_CFG_BPF_MAX_LEN 0x0082
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment