Commit 632130ed authored by Daniel Borkmann's avatar Daniel Borkmann

Merge branch 'bpf-nfp-misc-improvements'

Jakub Kicinski says:

====================
This series starts with a fix to Jesper's recent work, somehow I forgot
about control rings during review.  Second patch is cleaning up a vNIC
header, in kdoc we should not use @ for #define constants.  Aligning of
the top of the stack as well as bottom (last bytes will be unused) helps
the performance.  We should check offload datapath's max MTU when program
is loaded and we can allow TC hw offload flag to be changed freely while
XDP offload is active.

Next group of patches adds more fully featured relocation support.  Due
to limited amount of code space we only load the image to NIC's memory
when program is attached.  Since we can't predict which programs are
loaded later, we should translate as if image was to be loaded at offset
zero and only apply relocations at load time.  Many more advanced features
(eg. tail class, subprograms, dynamic allocation of program space and
sharing it between ports) will depend on this.

Nic adds support for signed comparison instructions.

Quentin makes use of the verifier log in our driver, the verifier print
function (verbose()) has to be renamed and exported.

v2:
 - replace #define by function aliasing for verbose() in patch 13
====================
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parents 148989d8 ff627e3d
...@@ -87,16 +87,21 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn) ...@@ -87,16 +87,21 @@ static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
static int static int
nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
{ {
struct nfp_bpf_vnic *bv;
int err; int err;
nn->app_priv = kzalloc(sizeof(struct nfp_bpf_vnic), GFP_KERNEL); bv = kzalloc(sizeof(*bv), GFP_KERNEL);
if (!nn->app_priv) if (!bv)
return -ENOMEM; return -ENOMEM;
nn->app_priv = bv;
err = nfp_app_nic_vnic_alloc(app, nn, id); err = nfp_app_nic_vnic_alloc(app, nn, id);
if (err) if (err)
goto err_free_priv; goto err_free_priv;
bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
return 0; return 0;
err_free_priv: err_free_priv:
kfree(nn->app_priv); kfree(nn->app_priv);
...@@ -191,7 +196,27 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev, ...@@ -191,7 +196,27 @@ static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn) static bool nfp_bpf_tc_busy(struct nfp_app *app, struct nfp_net *nn)
{ {
return nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF; struct nfp_bpf_vnic *bv = nn->app_priv;
return !!bv->tc_prog;
}
static int
nfp_bpf_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
unsigned int max_mtu;
if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return 0;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (new_mtu > max_mtu) {
nn_info(nn, "BPF offload active, MTU over %u not supported\n",
max_mtu);
return -EBUSY;
}
return 0;
} }
static int static int
...@@ -311,6 +336,8 @@ const struct nfp_app_type app_bpf = { ...@@ -311,6 +336,8 @@ const struct nfp_app_type app_bpf = {
.init = nfp_bpf_init, .init = nfp_bpf_init,
.clean = nfp_bpf_clean, .clean = nfp_bpf_clean,
.change_mtu = nfp_bpf_change_mtu,
.extra_cap = nfp_bpf_extra_cap, .extra_cap = nfp_bpf_extra_cap,
.vnic_alloc = nfp_bpf_vnic_alloc, .vnic_alloc = nfp_bpf_vnic_alloc,
...@@ -318,9 +345,6 @@ const struct nfp_app_type app_bpf = { ...@@ -318,9 +345,6 @@ const struct nfp_app_type app_bpf = {
.setup_tc = nfp_bpf_setup_tc, .setup_tc = nfp_bpf_setup_tc,
.tc_busy = nfp_bpf_tc_busy, .tc_busy = nfp_bpf_tc_busy,
.bpf = nfp_ndo_bpf,
.xdp_offload = nfp_bpf_xdp_offload, .xdp_offload = nfp_bpf_xdp_offload,
.bpf_verifier_prep = nfp_bpf_verifier_prep,
.bpf_translate = nfp_bpf_translate,
.bpf_destroy = nfp_bpf_destroy,
}; };
...@@ -42,17 +42,28 @@ ...@@ -42,17 +42,28 @@
#include "../nfp_asm.h" #include "../nfp_asm.h"
/* For branch fixup logic use up-most byte of branch instruction as scratch /* For relocation logic use up-most byte of branch instruction as scratch
* area. Remember to clear this before sending instructions to HW! * area. Remember to clear this before sending instructions to HW!
*/ */
#define OP_BR_SPECIAL 0xff00000000000000ULL #define OP_RELO_TYPE 0xff00000000000000ULL
enum br_special { enum nfp_relo_type {
OP_BR_NORMAL = 0, RELO_NONE = 0,
OP_BR_GO_OUT, /* standard internal jumps */
OP_BR_GO_ABORT, RELO_BR_REL,
/* internal jumps to parts of the outro */
RELO_BR_GO_OUT,
RELO_BR_GO_ABORT,
/* external jumps to fixed addresses */
RELO_BR_NEXT_PKT,
}; };
/* To make absolute relocated branches (branches other than RELO_BR_REL)
* distinguishable in user space dumps from normal jumps, add a large offset
* to them.
*/
#define BR_OFF_RELO 15000
enum static_regs { enum static_regs {
STATIC_REG_IMM = 21, /* Bank AB */ STATIC_REG_IMM = 21, /* Bank AB */
STATIC_REG_STACK = 22, /* Bank A */ STATIC_REG_STACK = 22, /* Bank A */
...@@ -191,11 +202,9 @@ static inline bool is_mbpf_store(const struct nfp_insn_meta *meta) ...@@ -191,11 +202,9 @@ static inline bool is_mbpf_store(const struct nfp_insn_meta *meta)
* @__prog_alloc_len: alloc size of @prog array * @__prog_alloc_len: alloc size of @prog array
* @verifier_meta: temporary storage for verifier's insn meta * @verifier_meta: temporary storage for verifier's insn meta
* @type: BPF program type * @type: BPF program type
* @start_off: address of the first instruction in the memory
* @last_bpf_off: address of the last instruction translated from BPF * @last_bpf_off: address of the last instruction translated from BPF
* @tgt_out: jump target for normal exit * @tgt_out: jump target for normal exit
* @tgt_abort: jump target for abort (e.g. access outside of packet buffer) * @tgt_abort: jump target for abort (e.g. access outside of packet buffer)
* @tgt_done: jump target to get the next packet
* @n_translated: number of successfully translated instructions (for errors) * @n_translated: number of successfully translated instructions (for errors)
* @error: error code if something went wrong * @error: error code if something went wrong
* @stack_depth: max stack depth from the verifier * @stack_depth: max stack depth from the verifier
...@@ -213,11 +222,9 @@ struct nfp_prog { ...@@ -213,11 +222,9 @@ struct nfp_prog {
enum bpf_prog_type type; enum bpf_prog_type type;
unsigned int start_off;
unsigned int last_bpf_off; unsigned int last_bpf_off;
unsigned int tgt_out; unsigned int tgt_out;
unsigned int tgt_abort; unsigned int tgt_abort;
unsigned int tgt_done;
unsigned int n_translated; unsigned int n_translated;
int error; int error;
...@@ -231,11 +238,16 @@ struct nfp_prog { ...@@ -231,11 +238,16 @@ struct nfp_prog {
/** /**
* struct nfp_bpf_vnic - per-vNIC BPF priv structure * struct nfp_bpf_vnic - per-vNIC BPF priv structure
* @tc_prog: currently loaded cls_bpf program * @tc_prog: currently loaded cls_bpf program
* @start_off: address of the first instruction in the memory
* @tgt_done: jump target to get the next packet
*/ */
struct nfp_bpf_vnic { struct nfp_bpf_vnic {
struct bpf_prog *tc_prog; struct bpf_prog *tc_prog;
unsigned int start_off;
unsigned int tgt_done;
}; };
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt);
int nfp_bpf_jit(struct nfp_prog *prog); int nfp_bpf_jit(struct nfp_prog *prog);
extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops; extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops;
...@@ -244,16 +256,14 @@ struct netdev_bpf; ...@@ -244,16 +256,14 @@ struct netdev_bpf;
struct nfp_app; struct nfp_app;
struct nfp_net; struct nfp_net;
int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf);
int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
bool old_prog); bool old_prog);
int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf);
int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
struct nfp_insn_meta * struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns); unsigned int insn_idx, unsigned int n_insns);
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
#endif #endif
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/mm.h>
#include <net/pkt_cls.h> #include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h> #include <net/tc_act/tc_gact.h>
...@@ -70,23 +71,7 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog, ...@@ -70,23 +71,7 @@ nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
list_add_tail(&meta->l, &nfp_prog->insns); list_add_tail(&meta->l, &nfp_prog->insns);
} }
/* Another pass to record jump information. */ nfp_bpf_jit_prepare(nfp_prog, cnt);
list_for_each_entry(meta, &nfp_prog->insns, l) {
u64 code = meta->insn.code;
if (BPF_CLASS(code) == BPF_JMP && BPF_OP(code) != BPF_EXIT &&
BPF_OP(code) != BPF_CALL) {
struct nfp_insn_meta *dst_meta;
unsigned short dst_indx;
dst_indx = meta->n + 1 + meta->insn.off;
dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_indx,
cnt);
meta->jmp_dst = dst_meta;
dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
}
}
return 0; return 0;
} }
...@@ -102,8 +87,9 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog) ...@@ -102,8 +87,9 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog)
kfree(nfp_prog); kfree(nfp_prog);
} }
int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, static int
struct netdev_bpf *bpf) nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf)
{ {
struct bpf_prog *prog = bpf->verifier.prog; struct bpf_prog *prog = bpf->verifier.prog;
struct nfp_prog *nfp_prog; struct nfp_prog *nfp_prog;
...@@ -133,8 +119,7 @@ int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, ...@@ -133,8 +119,7 @@ int nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
return ret; return ret;
} }
int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn, static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
struct bpf_prog *prog)
{ {
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int stack_size; unsigned int stack_size;
...@@ -146,37 +131,48 @@ int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn, ...@@ -146,37 +131,48 @@ int nfp_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
prog->aux->stack_depth, stack_size); prog->aux->stack_depth, stack_size);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
nfp_prog->stack_depth = round_up(prog->aux->stack_depth, 4);
nfp_prog->stack_depth = prog->aux->stack_depth;
nfp_prog->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
nfp_prog->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN); max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
nfp_prog->__prog_alloc_len = max_instr * sizeof(u64); nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
nfp_prog->prog = kmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL); nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
if (!nfp_prog->prog) if (!nfp_prog->prog)
return -ENOMEM; return -ENOMEM;
return nfp_bpf_jit(nfp_prog); return nfp_bpf_jit(nfp_prog);
} }
int nfp_bpf_destroy(struct nfp_app *app, struct nfp_net *nn, static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
struct bpf_prog *prog)
{ {
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
kfree(nfp_prog->prog); kvfree(nfp_prog->prog);
nfp_prog_free(nfp_prog); nfp_prog_free(nfp_prog);
return 0; return 0;
} }
int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
{
switch (bpf->command) {
case BPF_OFFLOAD_VERIFIER_PREP:
return nfp_bpf_verifier_prep(app, nn, bpf);
case BPF_OFFLOAD_TRANSLATE:
return nfp_bpf_translate(nn, bpf->offload.prog);
case BPF_OFFLOAD_DESTROY:
return nfp_bpf_destroy(nn, bpf->offload.prog);
default:
return -EINVAL;
}
}
static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
{ {
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
unsigned int max_mtu; unsigned int max_mtu;
dma_addr_t dma_addr; dma_addr_t dma_addr;
void *img;
int err; int err;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
...@@ -185,11 +181,17 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) ...@@ -185,11 +181,17 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
dma_addr = dma_map_single(nn->dp.dev, nfp_prog->prog, img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
if (IS_ERR(img))
return PTR_ERR(img);
dma_addr = dma_map_single(nn->dp.dev, img,
nfp_prog->prog_len * sizeof(u64), nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(nn->dp.dev, dma_addr)) if (dma_mapping_error(nn->dp.dev, dma_addr)) {
kfree(img);
return -ENOMEM; return -ENOMEM;
}
nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len); nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr); nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
...@@ -201,6 +203,7 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog) ...@@ -201,6 +203,7 @@ static int nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog)
dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64), dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
DMA_TO_DEVICE); DMA_TO_DEVICE);
kfree(img);
return err; return err;
} }
......
...@@ -31,8 +31,6 @@ ...@@ -31,8 +31,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#define pr_fmt(fmt) "NFP net bpf: " fmt
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/bpf_verifier.h> #include <linux/bpf_verifier.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -41,6 +39,9 @@ ...@@ -41,6 +39,9 @@
#include "fw.h" #include "fw.h"
#include "main.h" #include "main.h"
#define pr_vlog(env, fmt, ...) \
bpf_verifier_log_write(env, "[nfp] " fmt, ##__VA_ARGS__)
struct nfp_insn_meta * struct nfp_insn_meta *
nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
unsigned int insn_idx, unsigned int n_insns) unsigned int insn_idx, unsigned int n_insns)
...@@ -116,18 +117,18 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env, ...@@ -116,18 +117,18 @@ nfp_bpf_check_call(struct nfp_prog *nfp_prog, struct bpf_verifier_env *env,
switch (func_id) { switch (func_id) {
case BPF_FUNC_xdp_adjust_head: case BPF_FUNC_xdp_adjust_head:
if (!bpf->adjust_head.off_max) { if (!bpf->adjust_head.off_max) {
pr_warn("adjust_head not supported by FW\n"); pr_vlog(env, "adjust_head not supported by FW\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) { if (!(bpf->adjust_head.flags & NFP_BPF_ADJUST_HEAD_NO_META)) {
pr_warn("adjust_head: FW requires shifting metadata, not supported by the driver\n"); pr_vlog(env, "adjust_head: FW requires shifting metadata, not supported by the driver\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
nfp_record_adjust_head(bpf, nfp_prog, meta, reg2); nfp_record_adjust_head(bpf, nfp_prog, meta, reg2);
break; break;
default: default:
pr_warn("unsupported function id: %d\n", func_id); pr_vlog(env, "unsupported function id: %d\n", func_id);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -150,7 +151,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, ...@@ -150,7 +151,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
char tn_buf[48]; char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off); tnum_strn(tn_buf, sizeof(tn_buf), reg0->var_off);
pr_info("unsupported exit state: %d, var_off: %s\n", pr_vlog(env, "unsupported exit state: %d, var_off: %s\n",
reg0->type, tn_buf); reg0->type, tn_buf);
return -EINVAL; return -EINVAL;
} }
...@@ -160,7 +161,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, ...@@ -160,7 +161,7 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
imm <= TC_ACT_REDIRECT && imm <= TC_ACT_REDIRECT &&
imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN && imm != TC_ACT_SHOT && imm != TC_ACT_STOLEN &&
imm != TC_ACT_QUEUED) { imm != TC_ACT_QUEUED) {
pr_info("unsupported exit state: %d, imm: %llx\n", pr_vlog(env, "unsupported exit state: %d, imm: %llx\n",
reg0->type, imm); reg0->type, imm);
return -EINVAL; return -EINVAL;
} }
...@@ -171,12 +172,13 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, ...@@ -171,12 +172,13 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
static int static int
nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
struct nfp_insn_meta *meta, struct nfp_insn_meta *meta,
const struct bpf_reg_state *reg) const struct bpf_reg_state *reg,
struct bpf_verifier_env *env)
{ {
s32 old_off, new_off; s32 old_off, new_off;
if (!tnum_is_const(reg->var_off)) { if (!tnum_is_const(reg->var_off)) {
pr_info("variable ptr stack access\n"); pr_vlog(env, "variable ptr stack access\n");
return -EINVAL; return -EINVAL;
} }
...@@ -194,7 +196,7 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog, ...@@ -194,7 +196,7 @@ nfp_bpf_check_stack_access(struct nfp_prog *nfp_prog,
if (old_off % 4 == new_off % 4) if (old_off % 4 == new_off % 4)
return 0; return 0;
pr_info("stack access changed location was:%d is:%d\n", pr_vlog(env, "stack access changed location was:%d is:%d\n",
old_off, new_off); old_off, new_off);
return -EINVAL; return -EINVAL;
} }
...@@ -209,18 +211,18 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, ...@@ -209,18 +211,18 @@ nfp_bpf_check_ptr(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
if (reg->type != PTR_TO_CTX && if (reg->type != PTR_TO_CTX &&
reg->type != PTR_TO_STACK && reg->type != PTR_TO_STACK &&
reg->type != PTR_TO_PACKET) { reg->type != PTR_TO_PACKET) {
pr_info("unsupported ptr type: %d\n", reg->type); pr_vlog(env, "unsupported ptr type: %d\n", reg->type);
return -EINVAL; return -EINVAL;
} }
if (reg->type == PTR_TO_STACK) { if (reg->type == PTR_TO_STACK) {
err = nfp_bpf_check_stack_access(nfp_prog, meta, reg); err = nfp_bpf_check_stack_access(nfp_prog, meta, reg, env);
if (err) if (err)
return err; return err;
} }
if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) { if (meta->ptr.type != NOT_INIT && meta->ptr.type != reg->type) {
pr_info("ptr type changed for instruction %d -> %d\n", pr_vlog(env, "ptr type changed for instruction %d -> %d\n",
meta->ptr.type, reg->type); meta->ptr.type, reg->type);
return -EINVAL; return -EINVAL;
} }
...@@ -241,7 +243,7 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) ...@@ -241,7 +243,7 @@ nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx)
if (meta->insn.src_reg >= MAX_BPF_REG || if (meta->insn.src_reg >= MAX_BPF_REG ||
meta->insn.dst_reg >= MAX_BPF_REG) { meta->insn.dst_reg >= MAX_BPF_REG) {
pr_err("program uses extended registers - jit hardening?\n"); pr_vlog(env, "program uses extended registers - jit hardening?\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -82,15 +82,15 @@ extern const struct nfp_app_type app_flower; ...@@ -82,15 +82,15 @@ extern const struct nfp_app_type app_flower;
* @repr_clean: representor about to be unregistered * @repr_clean: representor about to be unregistered
* @repr_open: representor netdev open callback * @repr_open: representor netdev open callback
* @repr_stop: representor netdev stop callback * @repr_stop: representor netdev stop callback
* @change_mtu: MTU change on a netdev has been requested (veto-only, change
* is not guaranteed to be committed)
* @start: start application logic * @start: start application logic
* @stop: stop application logic * @stop: stop application logic
* @ctrl_msg_rx: control message handler * @ctrl_msg_rx: control message handler
* @setup_tc: setup TC ndo * @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded) * @tc_busy: TC HW offload busy (rules loaded)
* @bpf: BPF ndo offload-related calls
* @xdp_offload: offload an XDP program * @xdp_offload: offload an XDP program
* @bpf_verifier_prep: verifier prep for dev-specific BPF programs
* @bpf_translate: translate call for dev-specific BPF programs
* @bpf_destroy: destroy for dev-specific BPF programs
* @eswitch_mode_get: get SR-IOV eswitch mode * @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation * @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up * @sriov_disable: app-specific sriov clean-up
...@@ -120,6 +120,9 @@ struct nfp_app_type { ...@@ -120,6 +120,9 @@ struct nfp_app_type {
int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr); int (*repr_open)(struct nfp_app *app, struct nfp_repr *repr);
int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr); int (*repr_stop)(struct nfp_app *app, struct nfp_repr *repr);
int (*change_mtu)(struct nfp_app *app, struct net_device *netdev,
int new_mtu);
int (*start)(struct nfp_app *app); int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app); void (*stop)(struct nfp_app *app);
...@@ -128,14 +131,10 @@ struct nfp_app_type { ...@@ -128,14 +131,10 @@ struct nfp_app_type {
int (*setup_tc)(struct nfp_app *app, struct net_device *netdev, int (*setup_tc)(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data); enum tc_setup_type type, void *type_data);
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn); bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
int (*bpf)(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *xdp);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn, int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog); struct bpf_prog *prog);
int (*bpf_verifier_prep)(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf);
int (*bpf_translate)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int (*bpf_destroy)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int (*sriov_enable)(struct nfp_app *app, int num_vfs); int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app); void (*sriov_disable)(struct nfp_app *app);
...@@ -242,6 +241,14 @@ nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev) ...@@ -242,6 +241,14 @@ nfp_app_repr_clean(struct nfp_app *app, struct net_device *netdev)
app->type->repr_clean(app, netdev); app->type->repr_clean(app, netdev);
} }
static inline int
nfp_app_change_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
{
if (!app || !app->type->change_mtu)
return 0;
return app->type->change_mtu(app, netdev, new_mtu);
}
static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl) static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
{ {
app->ctrl = ctrl; app->ctrl = ctrl;
...@@ -303,6 +310,14 @@ static inline int nfp_app_setup_tc(struct nfp_app *app, ...@@ -303,6 +310,14 @@ static inline int nfp_app_setup_tc(struct nfp_app *app,
return app->type->setup_tc(app, netdev, type, type_data); return app->type->setup_tc(app, netdev, type, type_data);
} }
static inline int nfp_app_bpf(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf)
{
if (!app || !app->type->bpf)
return -EINVAL;
return app->type->bpf(app, nn, bpf);
}
static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog) struct bpf_prog *prog)
{ {
...@@ -311,33 +326,6 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn, ...@@ -311,33 +326,6 @@ static inline int nfp_app_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
return app->type->xdp_offload(app, nn, prog); return app->type->xdp_offload(app, nn, prog);
} }
static inline int
nfp_app_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
struct netdev_bpf *bpf)
{
if (!app || !app->type->bpf_verifier_prep)
return -EOPNOTSUPP;
return app->type->bpf_verifier_prep(app, nn, bpf);
}
static inline int
nfp_app_bpf_translate(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
if (!app || !app->type->bpf_translate)
return -EOPNOTSUPP;
return app->type->bpf_translate(app, nn, prog);
}
static inline int
nfp_app_bpf_destroy(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog)
{
if (!app || !app->type->bpf_destroy)
return -EOPNOTSUPP;
return app->type->bpf_destroy(app, nn, prog);
}
static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb) static inline bool nfp_app_ctrl_tx(struct nfp_app *app, struct sk_buff *skb)
{ {
trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0, trace_devlink_hwmsg(priv_to_devlink(app->pf), false, 0,
......
...@@ -50,6 +50,36 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = { ...@@ -50,6 +50,36 @@ const struct cmd_tgt_act cmd_tgt_act[__CMD_TGT_MAP_SIZE] = {
[CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 }, [CMD_TGT_READ_SWAP_LE] = { 0x03, 0x40 },
}; };
u16 br_get_offset(u64 instr)
{
u16 addr_lo, addr_hi;
addr_lo = FIELD_GET(OP_BR_ADDR_LO, instr);
addr_hi = FIELD_GET(OP_BR_ADDR_HI, instr);
return (addr_hi * ((OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO)) + 1)) |
addr_lo;
}
void br_set_offset(u64 *instr, u16 offset)
{
u16 addr_lo, addr_hi;
addr_lo = offset & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
addr_hi = offset != addr_lo;
*instr &= ~(OP_BR_ADDR_HI | OP_BR_ADDR_LO);
*instr |= FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
*instr |= FIELD_PREP(OP_BR_ADDR_LO, addr_lo);
}
void br_add_offset(u64 *instr, u16 offset)
{
u16 addr;
addr = br_get_offset(*instr);
br_set_offset(instr, addr + offset);
}
static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst) static u16 nfp_swreg_to_unreg(swreg reg, bool is_dst)
{ {
bool lm_id, lm_dec = false; bool lm_id, lm_dec = false;
......
...@@ -81,6 +81,7 @@ enum br_mask { ...@@ -81,6 +81,7 @@ enum br_mask {
BR_BHS = 0x04, BR_BHS = 0x04,
BR_BLO = 0x05, BR_BLO = 0x05,
BR_BGE = 0x08, BR_BGE = 0x08,
BR_BLT = 0x09,
BR_UNC = 0x18, BR_UNC = 0x18,
}; };
...@@ -93,6 +94,10 @@ enum br_ctx_signal_state { ...@@ -93,6 +94,10 @@ enum br_ctx_signal_state {
BR_CSS_NONE = 2, BR_CSS_NONE = 2,
}; };
u16 br_get_offset(u64 instr);
void br_set_offset(u64 *instr, u16 offset);
void br_add_offset(u64 *instr, u16 offset);
#define OP_BBYTE_BASE 0x0c800000000ULL #define OP_BBYTE_BASE 0x0c800000000ULL
#define OP_BB_A_SRC 0x000000000ffULL #define OP_BB_A_SRC 0x000000000ffULL
#define OP_BB_BYTE 0x00000000300ULL #define OP_BB_BYTE 0x00000000300ULL
......
...@@ -2253,7 +2253,8 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring) ...@@ -2253,7 +2253,8 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp; struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
xdp_rxq_info_unreg(&rx_ring->xdp_rxq); if (dp->netdev)
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
kfree(rx_ring->rxbufs); kfree(rx_ring->rxbufs);
if (rx_ring->rxds) if (rx_ring->rxds)
...@@ -2279,9 +2280,12 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring) ...@@ -2279,9 +2280,12 @@ nfp_net_rx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring)
{ {
int sz, err; int sz, err;
err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev, rx_ring->idx); if (dp->netdev) {
if (err < 0) err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, dp->netdev,
return err; rx_ring->idx);
if (err < 0)
return err;
}
rx_ring->cnt = dp->rxd_cnt; rx_ring->cnt = dp->rxd_cnt;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt; rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
...@@ -3045,6 +3049,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -3045,6 +3049,11 @@ static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{ {
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_dp *dp; struct nfp_net_dp *dp;
int err;
err = nfp_app_change_mtu(nn->app, netdev, new_mtu);
if (err)
return err;
dp = nfp_net_clone_dp(nn); dp = nfp_net_clone_dp(nn);
if (!dp) if (!dp)
...@@ -3405,16 +3414,8 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp) ...@@ -3405,16 +3414,8 @@ static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0; xdp->prog_id = nn->xdp_prog ? nn->xdp_prog->aux->id : 0;
xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0; xdp->prog_flags = nn->xdp_prog ? nn->xdp_flags : 0;
return 0; return 0;
case BPF_OFFLOAD_VERIFIER_PREP:
return nfp_app_bpf_verifier_prep(nn->app, nn, xdp);
case BPF_OFFLOAD_TRANSLATE:
return nfp_app_bpf_translate(nn->app, nn,
xdp->offload.prog);
case BPF_OFFLOAD_DESTROY:
return nfp_app_bpf_destroy(nn->app, nn,
xdp->offload.prog);
default: default:
return -EINVAL; return nfp_app_bpf(nn->app, nn, xdp);
} }
} }
......
...@@ -91,23 +91,24 @@ ...@@ -91,23 +91,24 @@
#define NFP_NET_RSS_IPV6_EX_UDP 9 #define NFP_NET_RSS_IPV6_EX_UDP 9
/** /**
* @NFP_NET_TXR_MAX: Maximum number of TX rings * Ring counts
* @NFP_NET_RXR_MAX: Maximum number of RX rings * %NFP_NET_TXR_MAX: Maximum number of TX rings
* %NFP_NET_RXR_MAX: Maximum number of RX rings
*/ */
#define NFP_NET_TXR_MAX 64 #define NFP_NET_TXR_MAX 64
#define NFP_NET_RXR_MAX 64 #define NFP_NET_RXR_MAX 64
/** /**
* Read/Write config words (0x0000 - 0x002c) * Read/Write config words (0x0000 - 0x002c)
* @NFP_NET_CFG_CTRL: Global control * %NFP_NET_CFG_CTRL: Global control
* @NFP_NET_CFG_UPDATE: Indicate which fields are updated * %NFP_NET_CFG_UPDATE: Indicate which fields are updated
* @NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings * %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
* @NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings * %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
* @NFP_NET_CFG_MTU: Set MTU size * %NFP_NET_CFG_MTU: Set MTU size
* @NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU) * %NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
* @NFP_NET_CFG_EXN: MSI-X table entry for exceptions * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions
* @NFP_NET_CFG_LSC: MSI-X table entry for link state changes * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes
* @NFP_NET_CFG_MACADDR: MAC address * %NFP_NET_CFG_MACADDR: MAC address
* *
* TODO: * TODO:
* - define Error details in UPDATE * - define Error details in UPDATE
...@@ -176,14 +177,14 @@ ...@@ -176,14 +177,14 @@
/** /**
* Read-only words (0x0030 - 0x0050): * Read-only words (0x0030 - 0x0050):
* @NFP_NET_CFG_VERSION: Firmware version number * %NFP_NET_CFG_VERSION: Firmware version number
* @NFP_NET_CFG_STS: Status * %NFP_NET_CFG_STS: Status
* @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL) * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
* @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings * %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
* @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings * %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
* @NFP_NET_CFG_MAX_MTU: Maximum support MTU * %NFP_NET_CFG_MAX_MTU: Maximum support MTU
* @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) * %NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
* @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) * %NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
* *
* TODO: * TODO:
* - define more STS bits * - define more STS bits
...@@ -228,31 +229,31 @@ ...@@ -228,31 +229,31 @@
/** /**
* RSS capabilities * RSS capabilities
* @NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as * %NFP_NET_CFG_RSS_CAP_HFUNC: supported hash functions (same bits as
* @NFP_NET_CFG_RSS_HFUNC) * %NFP_NET_CFG_RSS_HFUNC)
*/ */
#define NFP_NET_CFG_RSS_CAP 0x0054 #define NFP_NET_CFG_RSS_CAP 0x0054
#define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000 #define NFP_NET_CFG_RSS_CAP_HFUNC 0xff000000
/** /**
* VXLAN/UDP encap configuration * VXLAN/UDP encap configuration
* @NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports * %NFP_NET_CFG_VXLAN_PORT: Base address of table of tunnels' UDP dst ports
* @NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes * %NFP_NET_CFG_VXLAN_SZ: Size of the UDP port table in bytes
*/ */
#define NFP_NET_CFG_VXLAN_PORT 0x0060 #define NFP_NET_CFG_VXLAN_PORT 0x0060
#define NFP_NET_CFG_VXLAN_SZ 0x0008 #define NFP_NET_CFG_VXLAN_SZ 0x0008
/** /**
* BPF section * BPF section
* @NFP_NET_CFG_BPF_ABI: BPF ABI version * %NFP_NET_CFG_BPF_ABI: BPF ABI version
* @NFP_NET_CFG_BPF_CAP: BPF capabilities * %NFP_NET_CFG_BPF_CAP: BPF capabilities
* @NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes * %NFP_NET_CFG_BPF_MAX_LEN: Maximum size of JITed BPF code in bytes
* @NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded * %NFP_NET_CFG_BPF_START: Offset at which BPF will be loaded
* @NFP_NET_CFG_BPF_DONE: Offset to jump to on exit * %NFP_NET_CFG_BPF_DONE: Offset to jump to on exit
* @NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks * %NFP_NET_CFG_BPF_STACK_SZ: Total size of stack area in 64B chunks
* @NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks * %NFP_NET_CFG_BPF_INL_MTU: Packet data split offset in 64B chunks
* @NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions * %NFP_NET_CFG_BPF_SIZE: Size of the JITed BPF code in instructions
* @NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code * %NFP_NET_CFG_BPF_ADDR: DMA address of the buffer with JITed BPF code
*/ */
#define NFP_NET_CFG_BPF_ABI 0x0080 #define NFP_NET_CFG_BPF_ABI 0x0080
#define NFP_NET_BPF_ABI 2 #define NFP_NET_BPF_ABI 2
...@@ -278,9 +279,9 @@ ...@@ -278,9 +279,9 @@
/** /**
* RSS configuration (0x0100 - 0x01ac): * RSS configuration (0x0100 - 0x01ac):
* Used only when NFP_NET_CFG_CTRL_RSS is enabled * Used only when NFP_NET_CFG_CTRL_RSS is enabled
* @NFP_NET_CFG_RSS_CFG: RSS configuration word * %NFP_NET_CFG_RSS_CFG: RSS configuration word
* @NFP_NET_CFG_RSS_KEY: RSS "secret" key * %NFP_NET_CFG_RSS_KEY: RSS "secret" key
* @NFP_NET_CFG_RSS_ITBL: RSS indirection table * %NFP_NET_CFG_RSS_ITBL: RSS indirection table
*/ */
#define NFP_NET_CFG_RSS_BASE 0x0100 #define NFP_NET_CFG_RSS_BASE 0x0100
#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE #define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE
...@@ -305,13 +306,13 @@ ...@@ -305,13 +306,13 @@
/** /**
* TX ring configuration (0x200 - 0x800) * TX ring configuration (0x200 - 0x800)
* @NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration * %NFP_NET_CFG_TXR_BASE: Base offset for TX ring configuration
* @NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries) * %NFP_NET_CFG_TXR_ADDR: Per TX ring DMA address (8B entries)
* @NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries) * %NFP_NET_CFG_TXR_WB_ADDR: Per TX ring write back DMA address (8B entries)
* @NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries) * %NFP_NET_CFG_TXR_SZ: Per TX ring ring size (1B entries)
* @NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries) * %NFP_NET_CFG_TXR_VEC: Per TX ring MSI-X table entry (1B entries)
* @NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries) * %NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
* @NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet * %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
*/ */
#define NFP_NET_CFG_TXR_BASE 0x0200 #define NFP_NET_CFG_TXR_BASE 0x0200
#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8)) #define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
...@@ -325,12 +326,12 @@ ...@@ -325,12 +326,12 @@
/** /**
* RX ring configuration (0x0800 - 0x0c00) * RX ring configuration (0x0800 - 0x0c00)
* @NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration * %NFP_NET_CFG_RXR_BASE: Base offset for RX ring configuration
* @NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries) * %NFP_NET_CFG_RXR_ADDR: Per RX ring DMA address (8B entries)
* @NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries) * %NFP_NET_CFG_RXR_SZ: Per RX ring ring size (1B entries)
* @NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries) * %NFP_NET_CFG_RXR_VEC: Per RX ring MSI-X table entry (1B entries)
* @NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries) * %NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
* @NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries) * %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
*/ */
#define NFP_NET_CFG_RXR_BASE 0x0800 #define NFP_NET_CFG_RXR_BASE 0x0800
#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8)) #define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
...@@ -343,7 +344,7 @@ ...@@ -343,7 +344,7 @@
/** /**
* Interrupt Control/Cause registers (0x0c00 - 0x0d00) * Interrupt Control/Cause registers (0x0c00 - 0x0d00)
* These registers are only used when MSI-X auto-masking is not * These registers are only used when MSI-X auto-masking is not
* enabled (@NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index * enabled (%NFP_NET_CFG_CTRL_MSIXAUTO not set). The array is index
* by MSI-X entry and are 1B in size. If an entry is zero, the * by MSI-X entry and are 1B in size. If an entry is zero, the
* corresponding entry is enabled. If the FW generates an interrupt, * corresponding entry is enabled. If the FW generates an interrupt,
* it writes a cause into the corresponding field. This also masks * it writes a cause into the corresponding field. This also masks
...@@ -393,8 +394,8 @@ ...@@ -393,8 +394,8 @@
/** /**
* Per ring stats (0x1000 - 0x1800) * Per ring stats (0x1000 - 0x1800)
* options, 64bit per entry * options, 64bit per entry
* @NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count) * %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
* @NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count) * %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
*/ */
#define NFP_NET_CFG_TXR_STATS_BASE 0x1000 #define NFP_NET_CFG_TXR_STATS_BASE 0x1000
#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \ #define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \
...@@ -418,10 +419,10 @@ ...@@ -418,10 +419,10 @@
/** /**
* VLAN filtering using general use mailbox * VLAN filtering using general use mailbox
* @NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox * %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox
* @NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter * %NFP_NET_CFG_VLAN_FILTER_VID: VLAN ID to filter
* @NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter * %NFP_NET_CFG_VLAN_FILTER_PROTO: VLAN proto to filter
* @NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes * %NFP_NET_CFG_VXLAN_SZ: Size of the VLAN filter mailbox in bytes
*/ */
#define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL #define NFP_NET_CFG_VLAN_FILTER NFP_NET_CFG_MBOX_VAL
#define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER #define NFP_NET_CFG_VLAN_FILTER_VID NFP_NET_CFG_VLAN_FILTER
......
...@@ -186,6 +186,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev, ...@@ -186,6 +186,13 @@ nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
return -EINVAL; return -EINVAL;
} }
static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_repr *repr = netdev_priv(netdev);
return nfp_app_change_mtu(repr->app, netdev, new_mtu);
}
static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev) static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
{ {
struct nfp_repr *repr = netdev_priv(netdev); struct nfp_repr *repr = netdev_priv(netdev);
...@@ -240,6 +247,7 @@ const struct net_device_ops nfp_repr_netdev_ops = { ...@@ -240,6 +247,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_open = nfp_repr_open, .ndo_open = nfp_repr_open,
.ndo_stop = nfp_repr_stop, .ndo_stop = nfp_repr_stop,
.ndo_start_xmit = nfp_repr_xmit, .ndo_start_xmit = nfp_repr_xmit,
.ndo_change_mtu = nfp_repr_change_mtu,
.ndo_get_stats64 = nfp_repr_get_stats64, .ndo_get_stats64 = nfp_repr_get_stats64,
.ndo_has_offload_stats = nfp_repr_has_offload_stats, .ndo_has_offload_stats = nfp_repr_has_offload_stats,
.ndo_get_offload_stats = nfp_repr_get_offload_stats, .ndo_get_offload_stats = nfp_repr_get_offload_stats,
......
...@@ -89,6 +89,7 @@ struct nfp_repr { ...@@ -89,6 +89,7 @@ struct nfp_repr {
* @NFP_REPR_TYPE_PHYS_PORT: external NIC port * @NFP_REPR_TYPE_PHYS_PORT: external NIC port
* @NFP_REPR_TYPE_PF: physical function * @NFP_REPR_TYPE_PF: physical function
* @NFP_REPR_TYPE_VF: virtual function * @NFP_REPR_TYPE_VF: virtual function
* @__NFP_REPR_TYPE_MAX: number of representor types
*/ */
enum nfp_repr_type { enum nfp_repr_type {
NFP_REPR_TYPE_PHYS_PORT, NFP_REPR_TYPE_PHYS_PORT,
......
...@@ -192,6 +192,9 @@ struct bpf_verifier_env { ...@@ -192,6 +192,9 @@ struct bpf_verifier_env {
u32 subprog_cnt; u32 subprog_cnt;
}; };
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env)
{ {
struct bpf_verifier_state *cur = env->cur_state; struct bpf_verifier_state *cur = env->cur_state;
......
...@@ -169,11 +169,11 @@ struct bpf_call_arg_meta { ...@@ -169,11 +169,11 @@ struct bpf_call_arg_meta {
static DEFINE_MUTEX(bpf_verifier_lock); static DEFINE_MUTEX(bpf_verifier_lock);
/* log_level controls verbosity level of eBPF verifier. /* log_level controls verbosity level of eBPF verifier.
* verbose() is used to dump the verification trace to the log, so the user * bpf_verifier_log_write() is used to dump the verification trace to the log,
* can figure out what's wrong with the program * so the user can figure out what's wrong with the program
*/ */
static __printf(2, 3) void verbose(struct bpf_verifier_env *env, __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...) const char *fmt, ...)
{ {
struct bpf_verifer_log *log = &env->log; struct bpf_verifer_log *log = &env->log;
unsigned int n; unsigned int n;
...@@ -197,6 +197,14 @@ static __printf(2, 3) void verbose(struct bpf_verifier_env *env, ...@@ -197,6 +197,14 @@ static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
else else
log->ubuf = NULL; log->ubuf = NULL;
} }
EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
/* Historically bpf_verifier_log_write was called verbose, but the name was too
* generic for symbol export. The function was renamed, but not the calls in
* the verifier to avoid complicating backports. Hence the alias below.
*/
static __printf(2, 3) void verbose(struct bpf_verifier_env *env,
const char *fmt, ...)
__attribute__((alias("bpf_verifier_log_write")));
static bool type_is_pkt_pointer(enum bpf_reg_type type) static bool type_is_pkt_pointer(enum bpf_reg_type type)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment