Commit c6c580d7 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: bpf: move to new BPF program offload infrastructure

Following steps are taken in the driver to offload an XDP program:

XDP_SETUP_PROG:
 * prepare:
   - allocate program state;
   - run verifier (bpf_analyzer());
   - run translation;
 * load:
   - stop old program if needed;
   - load program;
   - enable BPF if not enabled;
 * clean up:
   - free program image.

With new infrastructure the flow will look like this:

BPF_OFFLOAD_VERIFIER_PREP:
  - allocate program state;
BPF_OFFLOAD_TRANSLATE:
   - run translation;
XDP_SETUP_PROG:
   - stop old program if needed;
   - load program;
   - enable BPF if not enabled;
BPF_OFFLOAD_DESTROY:
   - free program image.

Take advantage of the new infrastructure.  Allocation of driver
metadata has to be moved from jit.c to offload.c since it's now
done at a different stage.  Since there is no separate driver
private data for verification step, move temporary nfp_meta
pointer into nfp_prog.  We will now use user space context
offsets.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9314c442
......@@ -1427,19 +1427,18 @@ static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
swreg dst = reg_both(meta->insn.dst_reg * 2);
switch (meta->insn.off) {
case offsetof(struct sk_buff, len):
if (size != FIELD_SIZEOF(struct sk_buff, len))
case offsetof(struct __sk_buff, len):
if (size != FIELD_SIZEOF(struct __sk_buff, len))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
break;
case offsetof(struct sk_buff, data):
if (size != sizeof(void *))
case offsetof(struct __sk_buff, data):
if (size != FIELD_SIZEOF(struct __sk_buff, data))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
break;
case offsetof(struct sk_buff, cb) +
offsetof(struct bpf_skb_data_end, data_end):
if (size != sizeof(void *))
case offsetof(struct __sk_buff, data_end):
if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
return -EOPNOTSUPP;
emit_alu(nfp_prog, dst,
plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
......@@ -1458,14 +1457,15 @@ static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
{
swreg dst = reg_both(meta->insn.dst_reg * 2);
if (size != sizeof(void *))
return -EINVAL;
switch (meta->insn.off) {
case offsetof(struct xdp_buff, data):
case offsetof(struct xdp_md, data):
if (size != FIELD_SIZEOF(struct xdp_md, data))
return -EOPNOTSUPP;
wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
break;
case offsetof(struct xdp_buff, data_end):
case offsetof(struct xdp_md, data_end):
if (size != FIELD_SIZEOF(struct xdp_md, data_end))
return -EOPNOTSUPP;
emit_alu(nfp_prog, dst,
plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
break;
......@@ -2243,19 +2243,10 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
return 0;
}
/**
* nfp_bpf_jit() - translate BPF code into NFP assembly
* @nfp_prog: nfp_prog prepared based on @filter
* @filter: kernel BPF filter struct
*/
int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter)
int nfp_bpf_jit(struct nfp_prog *nfp_prog)
{
int ret;
ret = nfp_prog_verify(nfp_prog, filter);
if (ret)
return ret;
ret = nfp_bpf_optimize(nfp_prog);
if (ret)
return ret;
......
......@@ -173,4 +173,8 @@ const struct nfp_app_type app_bpf = {
.setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy,
.xdp_offload = nfp_bpf_xdp_offload,
.bpf_verifier_prep = nfp_bpf_verifier_prep,
.bpf_translate = nfp_bpf_translate,
.bpf_destroy = nfp_bpf_destroy,
};
......@@ -139,6 +139,7 @@ static inline u8 mbpf_mode(const struct nfp_insn_meta *meta)
* @prog: machine code
* @prog_len: number of valid instructions in @prog array
* @__prog_alloc_len: alloc size of @prog array
* @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type
* @start_off: address of the first instruction in the memory
* @tgt_out: jump target for normal exit
......@@ -154,6 +155,8 @@ struct nfp_prog {
unsigned int prog_len;
unsigned int __prog_alloc_len;
struct nfp_insn_meta *verifier_meta;
enum bpf_prog_type type;
unsigned int start_off;
......@@ -169,13 +172,21 @@ struct nfp_prog {
struct list_head insns;
};
int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter);
int nfp_bpf_jit(struct nfp_prog *prog);
int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
extern const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops;
struct netdev_bpf;
struct nfp_app;
struct nfp_net;
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog);
int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf);
int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
#endif
......@@ -84,14 +84,17 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog);
}
static struct nfp_prog *nfp_bpf_verifier_prep(struct bpf_prog *prog)
int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf)
{
struct bpf_prog *prog = bpf->verifier.prog;
struct nfp_prog *nfp_prog;
int ret;
nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
if (!nfp_prog)
return NULL;
return -ENOMEM;
prog->aux->offload->dev_priv = nfp_prog;
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = prog->type;
......@@ -100,18 +103,21 @@ static struct nfp_prog *nfp_bpf_verifier_prep(struct bpf_prog *prog)
if (ret)
goto err_free;
return nfp_prog;
nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
bpf->verifier.ops = &nfp_bpf_analyzer_ops;
return 0;
err_free:
nfp_prog_free(nfp_prog);
return NULL;
return ret;
}
static int
nfp_bpf_translate(struct nfp_net *nn, struct nfp_prog *nfp_prog,
int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int stack_size;
unsigned int max_instr;
......@@ -133,55 +139,38 @@ nfp_bpf_translate(struct nfp_net *nn, struct nfp_prog *nfp_prog,
if (!nfp_prog->prog)
return -ENOMEM;
return nfp_bpf_jit(nfp_prog, prog);
return nfp_bpf_jit(nfp_prog);
}
static void nfp_bpf_destroy(struct nfp_prog *nfp_prog)
int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kfree(nfp_prog->prog);
nfp_prog_free(nfp_prog);
return 0;
}
static struct nfp_prog *
nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog,
dma_addr_t *dma_addr)
static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_prog *nfp_prog;
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_mtu;
dma_addr_t dma_addr;
int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
return NULL;
return -EOPNOTSUPP;
}
nfp_prog = nfp_bpf_verifier_prep(prog);
if (!nfp_prog)
return NULL;
err = nfp_bpf_translate(nn, nfp_prog, prog);
if (err)
goto err_destroy_prog;
*dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
if (dma_mapping_error(nn->dp.dev, *dma_addr))
goto err_destroy_prog;
return 0;
err_destroy_prog:
nfp_bpf_destroy(nfp_prog);
return NULL;
}
static void
nfp_net_bpf_load(struct nfp_net *nn, struct nfp_prog *nfp_prog,
dma_addr_t dma_addr)
{
int err;
if (dma_mapping_error(nn->dp.dev, dma_addr))
return -ENOMEM;
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
......@@ -193,7 +182,8 @@ nfp_net_bpf_load(struct nfp_net *nn, struct nfp_prog *nfp_prog,
dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
nfp_bpf_destroy(nfp_prog);
return err;
}
static void nfp_net_bpf_start(struct nfp_net *nn)
......@@ -222,8 +212,10 @@ static int nfp_net_bpf_stop(struct nfp_net *nn)
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog)
{
struct nfp_prog *nfp_prog;
dma_addr_t dma_addr;
int err;
if (prog && !prog->aux->offload)
return -EINVAL;
if (prog && old_prog) {
u8 cap;
......@@ -242,11 +234,10 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
if (old_prog && !prog)
return nfp_net_bpf_stop(nn);
nfp_prog = nfp_net_bpf_offload_prepare(nn, prog, &dma_addr);
if (!nfp_prog)
return -EINVAL;
err = nfp_net_bpf_load(nn, prog);
if (err)
return err;
nfp_net_bpf_load(nn, nfp_prog, dma_addr);
if (!old_prog)
nfp_net_bpf_start(nn);
......
......@@ -40,12 +40,6 @@
#include "main.h"
/* Analyzer/verifier definitions */
struct nfp_bpf_analyzer_priv {
struct nfp_prog *prog;
struct nfp_insn_meta *meta;
};
static struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns)
......@@ -171,11 +165,11 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
static int
nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
{
struct nfp_bpf_analyzer_priv *priv = env->analyzer_priv;
struct nfp_insn_meta *meta = priv->meta;
struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv;
struct nfp_insn_meta *meta = nfp_prog->verifier_meta;
meta = nfp_bpf_goto_meta(priv->prog, meta, insn_idx, env->prog->len);
priv->meta = meta;
meta = nfp_bpf_goto_meta(nfp_prog, meta, insn_idx, env->prog->len);
nfp_prog->verifier_meta = meta;
if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) {
......@@ -184,39 +178,18 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
}
if (meta->insn.code == (BPF_JMP | BPF_EXIT))
return nfp_bpf_check_exit(priv->prog, env);
return nfp_bpf_check_exit(nfp_prog, env);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_LDX | BPF_MEM))
return nfp_bpf_check_ptr(priv->prog, meta, env,
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.src_reg);
if ((meta->insn.code & ~BPF_SIZE_MASK) == (BPF_STX | BPF_MEM))
return nfp_bpf_check_ptr(priv->prog, meta, env,
return nfp_bpf_check_ptr(nfp_prog, meta, env,
meta->insn.dst_reg);
return 0;
}
static const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
const struct bpf_ext_analyzer_ops nfp_bpf_analyzer_ops = {
.insn_hook = nfp_verify_insn,
};
int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog)
{
struct nfp_bpf_analyzer_priv *priv;
int ret;
nfp_prog->stack_depth = prog->aux->stack_depth;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->prog = nfp_prog;
priv->meta = nfp_prog_first_meta(nfp_prog);
ret = bpf_analyzer(prog, &nfp_bpf_analyzer_ops, priv);
kfree(priv);
return ret;
}
......@@ -42,6 +42,7 @@
struct bpf_prog;
struct net_device;
struct netdev_bpf;
struct pci_dev;
struct sk_buff;
struct sk_buff;
......@@ -83,6 +84,9 @@ extern const struct nfp_app_type app_flower;
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
* @xdp_offload: offload an XDP program
* @bpf_verifier_prep: verifier prep for dev-specific BPF programs
* @bpf_translate: translate call for dev-specific BPF programs
* @bpf_destroy: destroy for dev-specific BPF programs
* @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
......@@ -118,6 +122,12 @@ struct nfp_app_type {
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf);
int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app);
......@@ -271,6 +281,33 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
return app->type->xdp_offload(app, nn, prog);
}
static inline int
nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf)
{
if (!app || !app->type->bpf_verifier_prep)
return -EOPNOTSUPP;
return app->type->bpf_verifier_prep(app, nn, bpf);
}
static inline int
nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
if (!app || !app->type->bpf_translate)
return -EOPNOTSUPP;
return app->type->bpf_translate(app, nn, prog);
}
static inline int
nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
if (!app || !app->type->bpf_destroy)
return -EOPNOTSUPP;
return app->type->bpf_destroy(app, nn, prog);
}
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{
trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
......
......@@ -3393,6 +3393,14 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
xdp->prog_attached = XDP_ATTACHED_HW;
xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
return 0;
case BPF_OFFLOAD_VERIFIER_PREP:
return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
case BPF_OFFLOAD_TRANSLATE:
return nfp_app_bpf_translate(nn->app, nn,
xdp->offload.prog);
case BPF_OFFLOAD_DESTROY:
return nfp_app_bpf_destroy(nn->app, nn,
xdp->offload.prog);
default:
return -EINVAL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment