Commit 9314c442 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: bpf: move translation prepare to offload.c

struct nfp_prog is currently only used internally by the translator.
This means there is a lot of parameter passing going on, between
the translator and different stages of offload.  Simplify things
by allocating nfp_prog in offload.c already.

We will now use kmalloc() to allocate the program area and only
DMA map it for the time of loading (instead of allocating DMA
coherent memory upfront).
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: default avatarQuentin Monnet <quentin.monnet@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c1c88eae
...@@ -2245,58 +2245,27 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore) ...@@ -2245,58 +2245,27 @@ static int nfp_bpf_ustore_calc(struct nfp_prog *nfp_prog, __le64 *ustore)
/** /**
* nfp_bpf_jit() - translate BPF code into NFP assembly * nfp_bpf_jit() - translate BPF code into NFP assembly
* @nfp_prog: nfp_prog prepared based on @filter
* @filter: kernel BPF filter struct * @filter: kernel BPF filter struct
* @prog_mem: memory to store assembler instructions
* @prog_start: offset of the first instruction when loaded
* @prog_done: where to jump on exit
* @prog_sz: size of @prog_mem in instructions
* @res: achieved parameters of translation results
*/ */
int int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter)
nfp_bpf_jit(struct bpf_prog *filter, void *prog_mem,
unsigned int prog_start, unsigned int prog_done,
unsigned int prog_sz, struct nfp_bpf_result *res)
{ {
struct nfp_prog *nfp_prog;
int ret; int ret;
nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
if (!nfp_prog)
return -ENOMEM;
INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = filter->type;
nfp_prog->start_off = prog_start;
nfp_prog->tgt_done = prog_done;
ret = nfp_prog_prepare(nfp_prog, filter->insnsi, filter->len);
if (ret)
goto out;
ret = nfp_prog_verify(nfp_prog, filter); ret = nfp_prog_verify(nfp_prog, filter);
if (ret) if (ret)
goto out; return ret;
ret = nfp_bpf_optimize(nfp_prog); ret = nfp_bpf_optimize(nfp_prog);
if (ret) if (ret)
goto out; return ret;
nfp_prog->prog = prog_mem;
nfp_prog->__prog_alloc_len = prog_sz;
ret = nfp_translate(nfp_prog); ret = nfp_translate(nfp_prog);
if (ret) { if (ret) {
pr_err("Translation failed with error %d (translated: %u)\n", pr_err("Translation failed with error %d (translated: %u)\n",
ret, nfp_prog->n_translated); ret, nfp_prog->n_translated);
ret = -EINVAL; return -EINVAL;
goto out;
} }
ret = nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)prog_mem); return nfp_bpf_ustore_calc(nfp_prog, (__force __le64 *)nfp_prog->prog);
res->n_instr = nfp_prog->prog_len;
out:
nfp_prog_free(nfp_prog);
return ret;
} }
...@@ -169,19 +169,7 @@ struct nfp_prog { ...@@ -169,19 +169,7 @@ struct nfp_prog {
struct list_head insns; struct list_head insns;
}; };
struct nfp_bpf_result { int nfp_bpf_jit(struct nfp_prog *nfp_prog, struct bpf_prog *filter);
unsigned int n_instr;
};
int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt);
void nfp_prog_free(struct nfp_prog *nfp_prog);
int
nfp_bpf_jit(struct bpf_prog *filter, void *prog,
unsigned int prog_start, unsigned int prog_done,
unsigned int prog_sz, struct nfp_bpf_result *res);
int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog); int nfp_prog_verify(struct nfp_prog *nfp_prog, struct bpf_prog *prog);
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
#include "../nfp_net_ctrl.h" #include "../nfp_net_ctrl.h"
#include "../nfp_net.h" #include "../nfp_net.h"
int static int
nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
unsigned int cnt) unsigned int cnt)
{ {
...@@ -73,7 +73,7 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, ...@@ -73,7 +73,7 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
return 0; return 0;
} }
void nfp_prog_free(struct nfp_prog *nfp_prog) static void nfp_prog_free(struct nfp_prog *nfp_prog)
{ {
struct nfp_insn_meta *meta, *tmp; struct nfp_insn_meta *meta, *tmp;
...@@ -84,25 +84,36 @@ void nfp_prog_free(struct nfp_prog *nfp_prog) ...@@ -84,25 +84,36 @@ void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog); kfree(nfp_prog);
} }
static int static struct nfp_prog *nfp_bpf_verifier_prep(struct bpf_prog *prog)
nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog,
struct nfp_bpf_result *res,
void **code, dma_addr_t *dma_addr, u16 max_instr)
{ {
unsigned int code_sz = max_instr * sizeof(u64); struct nfp_prog *nfp_prog;
unsigned int stack_size;
u16 start_off, done_off;
unsigned int max_mtu;
int ret; int ret;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
if (max_mtu < nn->dp.netdev->mtu) { if (!nfp_prog)
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n"); return NULL;
return -EOPNOTSUPP;
} INIT_LIST_HEAD(&nfp_prog->insns);
nfp_prog->type = prog->type;
start_off = nn_readw(nn, NFP_NET_CFG_BPF_START); ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE); if (ret)
goto err_free;
return nfp_prog;
err_free:
nfp_prog_free(nfp_prog);
return NULL;
}
static int
nfp_bpf_translate(struct nfp_net *nn, struct nfp_prog *nfp_prog,
struct bpf_prog *prog)
{
unsigned int stack_size;
unsigned int max_instr;
stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64; stack_size = nn_readb(nn, NFP_NET_CFG_BPF_STACK_SZ) * 64;
if (prog->aux->stack_depth > stack_size) { if (prog->aux->stack_depth > stack_size) {
...@@ -111,28 +122,68 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog, ...@@ -111,28 +122,68 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
*code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL); nfp_prog->stack_depth = prog->aux->stack_depth;
if (!*code) nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
if (!nfp_prog->prog)
return -ENOMEM; return -ENOMEM;
ret = nfp_bpf_jit(prog, *code, start_off, done_off, max_instr, res); return nfp_bpf_jit(nfp_prog, prog);
if (ret) }
goto out;
static void nfp_bpf_destroy(struct nfp_prog *nfp_prog)
{
kfree(nfp_prog->prog);
nfp_prog_free(nfp_prog);
}
static struct nfp_prog *
nfp_net_bpf_offload_prepare(struct nfp_net *nn, struct bpf_prog *prog,
dma_addr_t *dma_addr)
{
struct nfp_prog *nfp_prog;
unsigned int max_mtu;
int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->dp.netdev->mtu) {
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
return NULL;
}
nfp_prog = nfp_bpf_verifier_prep(prog);
if (!nfp_prog)
return NULL;
err = nfp_bpf_translate(nn, nfp_prog, prog);
if (err)
goto err_destroy_prog;
*dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog,
nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
if (dma_mapping_error(nn->dp.dev, *dma_addr))
goto err_destroy_prog;
return 0; return 0;
out: err_destroy_prog:
dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr); nfp_bpf_destroy(nfp_prog);
return ret; return NULL;
} }
static void static void
nfp_net_bpf_load(struct nfp_net *nn, void *code, dma_addr_t dma_addr, nfp_net_bpf_load(struct nfp_net *nn, struct nfp_prog *nfp_prog,
unsigned int code_sz, unsigned int n_instr) dma_addr_t dma_addr)
{ {
int err; int err;
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr); nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
/* Load up the JITed code */ /* Load up the JITed code */
...@@ -140,7 +191,9 @@ nfp_net_bpf_load(struct nfp_net *nn, void *code, dma_addr_t dma_addr, ...@@ -140,7 +191,9 @@ nfp_net_bpf_load(struct nfp_net *nn, void *code, dma_addr_t dma_addr,
if (err) if (err)
nn_err(nn, "FW command error while loading BPF: %d\n", err); nn_err(nn, "FW command error while loading BPF: %d\n", err);
dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr); dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE);
nfp_bpf_destroy(nfp_prog);
} }
static void nfp_net_bpf_start(struct nfp_net *nn) static void nfp_net_bpf_start(struct nfp_net *nn)
...@@ -169,11 +222,8 @@ static int nfp_net_bpf_stop(struct nfp_net *nn) ...@@ -169,11 +222,8 @@ static int nfp_net_bpf_stop(struct nfp_net *nn)
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog) bool old_prog)
{ {
struct nfp_bpf_result res; struct nfp_prog *nfp_prog;
dma_addr_t dma_addr; dma_addr_t dma_addr;
u16 max_instr;
void *code;
int err;
if (prog && old_prog) { if (prog && old_prog) {
u8 cap; u8 cap;
...@@ -192,15 +242,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, ...@@ -192,15 +242,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
if (old_prog && !prog) if (old_prog && !prog)
return nfp_net_bpf_stop(nn); return nfp_net_bpf_stop(nn);
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); nfp_prog = nfp_net_bpf_offload_prepare(nn, prog, &dma_addr);
if (!nfp_prog)
err = nfp_net_bpf_offload_prepare(nn, prog, &res, &code, &dma_addr, return -EINVAL;
max_instr);
if (err)
return err;
nfp_net_bpf_load(nn, code, dma_addr, max_instr * sizeof(u64), nfp_net_bpf_load(nn, nfp_prog, dma_addr);
res.n_instr);
if (!old_prog) if (!old_prog)
nfp_net_bpf_start(nn); nfp_net_bpf_start(nn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment