Commit 57939fdc authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp3800'

Simon Horman says:

====================
nfp: support for NFP-3800

Yinjun Zhan says:

This is the second of a two part series to support the NFP-3800 device.

To utilize the new hardware features of the NFP-3800, driver adds support
of a new data path NFDK. This series mainly does some refactor work to the
data path related implementations. The data path specific implementations
are now separated into nfd3 and nfdk directories respectively, and the
common part is also moved into a new file.

* The series starts with a small refinement in Patch 1/10. Patches 2/10 and
  3/10 are the main refactoring of data path implementation, which prepares
  for the adding the NFDK data path.
* Before the introduction of NFDK, there's some more preparation work
  for NFP-3800 features, such as multi-descriptor per-packet and write-back
  mechanism of TX pointer, which is done in patches 4/10, 5/10, 6/10, 7/10.
* Patch 8/10 allows the driver to select data path according
  to firmware version. Finally, patches 9/10 and 10/10 introduce the new
  NFDK data path.

Changes between v1 and v2
* Correct kdoc for nfp_nfdk_tx()
* Correct build warnings on 32-bit

Thanks to everyone who contributed to this work.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4b45e079 d9d95049
......@@ -20,12 +20,18 @@ nfp-objs := \
ccm_mbox.o \
devlink_param.o \
nfp_asm.o \
nfd3/dp.o \
nfd3/rings.o \
nfd3/xsk.o \
nfdk/dp.o \
nfdk/rings.o \
nfp_app.o \
nfp_app_nic.o \
nfp_devlink.o \
nfp_hwmon.o \
nfp_main.o \
nfp_net_common.o \
nfp_net_dp.o \
nfp_net_ctrl.o \
nfp_net_debugdump.o \
nfp_net_ethtool.o \
......
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
#ifndef _NFP_DP_NFD3_H_
#define _NFP_DP_NFD3_H_
struct sk_buff;
struct net_device;
/* TX descriptor format */
#define NFD3_DESC_TX_EOP BIT(7)
#define NFD3_DESC_TX_OFFSET_MASK GENMASK(6, 0)
#define NFD3_DESC_TX_MSS_MASK GENMASK(13, 0)
/* Flags in the host TX descriptor */
#define NFD3_DESC_TX_CSUM BIT(7)
#define NFD3_DESC_TX_IP4_CSUM BIT(6)
#define NFD3_DESC_TX_TCP_CSUM BIT(5)
#define NFD3_DESC_TX_UDP_CSUM BIT(4)
#define NFD3_DESC_TX_VLAN BIT(3)
#define NFD3_DESC_TX_LSO BIT(2)
#define NFD3_DESC_TX_ENCAP BIT(1)
#define NFD3_DESC_TX_O_IP4_CSUM BIT(0)
struct nfp_nfd3_tx_desc {
union {
struct {
u8 dma_addr_hi; /* High bits of host buf address */
__le16 dma_len; /* Length to DMA for this desc */
u8 offset_eop; /* Offset in buf where pkt starts +
* highest bit is eop flag.
*/
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
__le16 mss; /* MSS to be used for LSO */
u8 lso_hdrlen; /* LSO, TCP payload offset */
u8 flags; /* TX Flags, see @NFD3_DESC_TX_* */
union {
struct {
u8 l3_offset; /* L3 header offset */
u8 l4_offset; /* L4 header offset */
};
__le16 vlan; /* VLAN tag to add if indicated */
};
__le16 data_len; /* Length of frame + meta data */
} __packed;
__le32 vals[4];
__le64 vals8[2];
};
};
/**
* struct nfp_nfd3_tx_buf - software TX buffer descriptor
* @skb: normal ring, sk_buff associated with this buffer
* @frag: XDP ring, page frag associated with this buffer
* @xdp: XSK buffer pool handle (for AF_XDP)
* @dma_addr: DMA mapping address of the buffer
* @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags)
* @pkt_cnt: Number of packets to be produced out of the skb associated
* with this buffer (valid only on the head's buffer).
* Will be 1 for all non-TSO packets.
* @is_xsk_tx: Flag if buffer is a RX buffer after a XDP_TX action and not a
* buffer from the TX queue (for AF_XDP).
* @real_len: Number of bytes which to be produced out of the skb (valid only
* on the head's buffer). Equal to skb->len for non-TSO packets.
*/
struct nfp_nfd3_tx_buf {
union {
struct sk_buff *skb;
void *frag;
struct xdp_buff *xdp;
};
dma_addr_t dma_addr;
union {
struct {
short int fidx;
u16 pkt_cnt;
};
struct {
bool is_xsk_tx;
};
};
u32 real_len;
};
void
nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
const struct nfp_net_rx_desc *rxd,
const struct nfp_meta_parsed *meta, struct sk_buff *skb);
bool
nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len);
void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget);
int nfp_nfd3_poll(struct napi_struct *napi, int budget);
netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev);
bool
nfp_nfd3_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, bool old);
void nfp_nfd3_ctrl_poll(struct tasklet_struct *t);
void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf);
int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget);
#endif
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
#include <linux/seq_file.h>
#include "../nfp_net.h"
#include "../nfp_net_dp.h"
#include "../nfp_net_xsk.h"
#include "nfd3.h"
static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_nfd3_tx_buf *txbuf;
unsigned int idx;
while (tx_ring->rd_p != tx_ring->wr_p) {
idx = D_IDX(tx_ring, tx_ring->rd_p);
txbuf = &tx_ring->txbufs[idx];
txbuf->real_len = 0;
tx_ring->qcp_rd_p++;
tx_ring->rd_p++;
if (tx_ring->r_vec->xsk_pool) {
if (txbuf->is_xsk_tx)
nfp_nfd3_xsk_tx_free(txbuf);
xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1);
}
}
}
/**
* nfp_nfd3_tx_ring_reset() - Free any untransmitted buffers and reset pointers
* @dp: NFP Net data path struct
* @tx_ring: TX ring structure
*
* Assumes that the device is stopped, must be idempotent.
*/
static void
nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct netdev_queue *nd_q;
const skb_frag_t *frag;
while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
struct nfp_nfd3_tx_buf *tx_buf;
struct sk_buff *skb;
int idx, nr_frags;
idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_buf = &tx_ring->txbufs[idx];
skb = tx_ring->txbufs[idx].skb;
nr_frags = skb_shinfo(skb)->nr_frags;
if (tx_buf->fidx == -1) {
/* unmap head */
dma_unmap_single(dp->dev, tx_buf->dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
dma_unmap_page(dp->dev, tx_buf->dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
}
/* check for last gather fragment */
if (tx_buf->fidx == nr_frags - 1)
dev_kfree_skb_any(skb);
tx_buf->dma_addr = 0;
tx_buf->skb = NULL;
tx_buf->fidx = -2;
tx_ring->qcp_rd_p++;
tx_ring->rd_p++;
}
if (tx_ring->is_xdp)
nfp_nfd3_xsk_tx_bufs_free(tx_ring);
memset(tx_ring->txds, 0, tx_ring->size);
tx_ring->wr_p = 0;
tx_ring->rd_p = 0;
tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0;
if (tx_ring->is_xdp || !dp->netdev)
return;
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q);
}
/**
* nfp_nfd3_tx_ring_free() - Free resources allocated to a TX ring
* @tx_ring: TX ring to free
*/
static void nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
kvfree(tx_ring->txbufs);
if (tx_ring->txds)
dma_free_coherent(dp->dev, tx_ring->size,
tx_ring->txds, tx_ring->dma);
tx_ring->cnt = 0;
tx_ring->txbufs = NULL;
tx_ring->txds = NULL;
tx_ring->dma = 0;
tx_ring->size = 0;
}
/**
* nfp_nfd3_tx_ring_alloc() - Allocate resource for a TX ring
* @dp: NFP Net data path struct
* @tx_ring: TX Ring structure to allocate
*
* Return: 0 on success, negative errno otherwise.
*/
static int
nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
tx_ring->cnt = dp->txd_cnt;
tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
&tx_ring->dma,
GFP_KERNEL | __GFP_NOWARN);
if (!tx_ring->txds) {
netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
tx_ring->cnt);
goto err_alloc;
}
tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
GFP_KERNEL);
if (!tx_ring->txbufs)
goto err_alloc;
if (!tx_ring->is_xdp && dp->netdev)
netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
tx_ring->idx);
return 0;
err_alloc:
nfp_nfd3_tx_ring_free(tx_ring);
return -ENOMEM;
}
static void
nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
unsigned int i;
if (!tx_ring->is_xdp)
return;
for (i = 0; i < tx_ring->cnt; i++) {
if (!tx_ring->txbufs[i].frag)
return;
nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
__free_page(virt_to_page(tx_ring->txbufs[i].frag));
}
}
static int
nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
struct nfp_nfd3_tx_buf *txbufs = tx_ring->txbufs;
unsigned int i;
if (!tx_ring->is_xdp)
return 0;
for (i = 0; i < tx_ring->cnt; i++) {
txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
if (!txbufs[i].frag) {
nfp_nfd3_tx_ring_bufs_free(dp, tx_ring);
return -ENOMEM;
}
}
return 0;
}
static void
nfp_nfd3_print_tx_descs(struct seq_file *file,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
u32 d_rd_p, u32 d_wr_p)
{
struct nfp_nfd3_tx_desc *txd;
u32 txd_cnt = tx_ring->cnt;
int i;
for (i = 0; i < txd_cnt; i++) {
struct xdp_buff *xdp;
struct sk_buff *skb;
txd = &tx_ring->txds[i];
seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
txd->vals[0], txd->vals[1],
txd->vals[2], txd->vals[3]);
if (!tx_ring->is_xdp) {
skb = READ_ONCE(tx_ring->txbufs[i].skb);
if (skb)
seq_printf(file, " skb->head=%p skb->data=%p",
skb->head, skb->data);
} else {
xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
if (xdp)
seq_printf(file, " xdp->data=%p", xdp->data);
}
if (tx_ring->txbufs[i].dma_addr)
seq_printf(file, " dma_addr=%pad",
&tx_ring->txbufs[i].dma_addr);
if (i == tx_ring->rd_p % txd_cnt)
seq_puts(file, " H_RD");
if (i == tx_ring->wr_p % txd_cnt)
seq_puts(file, " H_WR");
if (i == d_rd_p % txd_cnt)
seq_puts(file, " D_RD");
if (i == d_wr_p % txd_cnt)
seq_puts(file, " D_WR");
seq_putc(file, '\n');
}
}
#define NFP_NFD3_CFG_CTRL_SUPPORTED \
(NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC | \
NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
NFP_NET_CFG_CTRL_RXVLAN | NFP_NET_CFG_CTRL_TXVLAN | \
NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_RSS | \
NFP_NET_CFG_CTRL_IRQMOD | NFP_NET_CFG_CTRL_TXRWB | \
NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
NFP_NET_CFG_CTRL_LIVE_ADDR)
const struct nfp_dp_ops nfp_nfd3_ops = {
.version = NFP_NFD_VER_NFD3,
.tx_min_desc_per_pkt = 1,
.cap_mask = NFP_NFD3_CFG_CTRL_SUPPORTED,
.poll = nfp_nfd3_poll,
.xsk_poll = nfp_nfd3_xsk_poll,
.ctrl_poll = nfp_nfd3_ctrl_poll,
.xmit = nfp_nfd3_tx,
.ctrl_tx_one = nfp_nfd3_ctrl_tx_one,
.rx_ring_fill_freelist = nfp_nfd3_rx_ring_fill_freelist,
.tx_ring_alloc = nfp_nfd3_tx_ring_alloc,
.tx_ring_reset = nfp_nfd3_tx_ring_reset,
.tx_ring_free = nfp_nfd3_tx_ring_free,
.tx_ring_bufs_alloc = nfp_nfd3_tx_ring_bufs_alloc,
.tx_ring_bufs_free = nfp_nfd3_tx_ring_bufs_free,
.print_tx_descs = nfp_nfd3_print_tx_descs
};
This diff is collapsed.
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2019 Netronome Systems, Inc. */
#ifndef _NFP_DP_NFDK_H_
#define _NFP_DP_NFDK_H_
#include <linux/bitops.h>
#include <linux/types.h>
#define NFDK_TX_DESC_PER_SIMPLE_PKT 2
#define NFDK_TX_MAX_DATA_PER_HEAD SZ_4K
#define NFDK_TX_MAX_DATA_PER_DESC SZ_16K
#define NFDK_TX_DESC_BLOCK_SZ 256
#define NFDK_TX_DESC_BLOCK_CNT (NFDK_TX_DESC_BLOCK_SZ / \
sizeof(struct nfp_nfdk_tx_desc))
#define NFDK_TX_DESC_STOP_CNT (NFDK_TX_DESC_BLOCK_CNT * \
NFDK_TX_DESC_PER_SIMPLE_PKT)
#define NFDK_TX_MAX_DATA_PER_BLOCK SZ_64K
#define NFDK_TX_DESC_GATHER_MAX 17
/* TX descriptor format */
#define NFDK_DESC_TX_MSS_MASK GENMASK(13, 0)
#define NFDK_DESC_TX_CHAIN_META BIT(3)
#define NFDK_DESC_TX_ENCAP BIT(2)
#define NFDK_DESC_TX_L4_CSUM BIT(1)
#define NFDK_DESC_TX_L3_CSUM BIT(0)
#define NFDK_DESC_TX_DMA_LEN_HEAD GENMASK(11, 0)
#define NFDK_DESC_TX_TYPE_HEAD GENMASK(15, 12)
#define NFDK_DESC_TX_DMA_LEN GENMASK(13, 0)
#define NFDK_DESC_TX_TYPE_NOP 0
#define NFDK_DESC_TX_TYPE_GATHER 1
#define NFDK_DESC_TX_TYPE_TSO 2
#define NFDK_DESC_TX_TYPE_SIMPLE 8
#define NFDK_DESC_TX_EOP BIT(14)
#define NFDK_META_LEN GENMASK(7, 0)
#define NFDK_META_FIELDS GENMASK(31, 8)
#define D_BLOCK_CPL(idx) (NFDK_TX_DESC_BLOCK_CNT - \
(idx) % NFDK_TX_DESC_BLOCK_CNT)
struct nfp_nfdk_tx_desc {
union {
struct {
u8 dma_addr_hi; /* High bits of host buf address */
u8 padding; /* Must be zero */
__le16 dma_len_type; /* Length to DMA for this desc */
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
};
struct {
__le16 mss; /* MSS to be used for LSO */
u8 lso_hdrlen; /* LSO, TCP payload offset */
u8 lso_totsegs; /* LSO, total segments */
u8 l3_offset; /* L3 header offset */
u8 l4_offset; /* L4 header offset */
__le16 lso_meta_res; /* Rsvd bits in TSO metadata */
};
struct {
u8 flags; /* TX Flags, see @NFDK_DESC_TX_* */
u8 reserved[7]; /* meta byte placeholder */
};
__le32 vals[2];
__le64 raw;
};
};
/* The device don't make use of the 2 or 3 least significant bits of the address
* due to alignment constraints. The driver can make use of those bits to carry
* information about the buffer before giving it to the device.
*
* NOTE: The driver must clear the lower bits before handing the buffer to the
* device.
*
* - NFDK_TX_BUF_INFO_SOP - Start of a packet
* Mark the buffer as a start of a packet. This is used in the XDP TX process
* to stash virtual and DMA address so that they can be recycled when the TX
* operation is completed.
*/
#define NFDK_TX_BUF_PTR(val) ((val) & ~(sizeof(void *) - 1))
#define NFDK_TX_BUF_INFO(val) ((val) & (sizeof(void *) - 1))
#define NFDK_TX_BUF_INFO_SOP BIT(0)
struct nfp_nfdk_tx_buf {
union {
/* First slot */
union {
struct sk_buff *skb;
void *frag;
unsigned long val;
};
/* 1 + nr_frags next slots */
dma_addr_t dma_addr;
/* TSO (optional) */
struct {
u32 pkt_cnt;
u32 real_len;
};
u64 raw;
};
};
static inline int nfp_nfdk_headlen_to_segs(unsigned int headlen)
{
/* First descriptor fits less data, so adjust for that */
return DIV_ROUND_UP(headlen +
NFDK_TX_MAX_DATA_PER_DESC -
NFDK_TX_MAX_DATA_PER_HEAD,
NFDK_TX_MAX_DATA_PER_DESC);
}
int nfp_nfdk_poll(struct napi_struct *napi, int budget);
netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev);
bool
nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, bool old);
void nfp_nfdk_ctrl_poll(struct tasklet_struct *t);
void nfp_nfdk_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
#endif
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2019 Netronome Systems, Inc. */
#include <linux/seq_file.h>
#include "../nfp_net.h"
#include "../nfp_net_dp.h"
#include "nfdk.h"
static void
nfp_nfdk_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct device *dev = dp->dev;
struct netdev_queue *nd_q;
while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
const skb_frag_t *frag, *fend;
unsigned int size, n_descs = 1;
struct nfp_nfdk_tx_buf *txbuf;
int nr_frags, rd_idx;
struct sk_buff *skb;
rd_idx = D_IDX(tx_ring, tx_ring->rd_p);
txbuf = &tx_ring->ktxbufs[rd_idx];
skb = txbuf->skb;
if (!skb) {
n_descs = D_BLOCK_CPL(tx_ring->rd_p);
goto next;
}
nr_frags = skb_shinfo(skb)->nr_frags;
txbuf++;
/* Unmap head */
size = skb_headlen(skb);
dma_unmap_single(dev, txbuf->dma_addr, size, DMA_TO_DEVICE);
n_descs += nfp_nfdk_headlen_to_segs(size);
txbuf++;
frag = skb_shinfo(skb)->frags;
fend = frag + nr_frags;
for (; frag < fend; frag++) {
size = skb_frag_size(frag);
dma_unmap_page(dev, txbuf->dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
n_descs += DIV_ROUND_UP(size,
NFDK_TX_MAX_DATA_PER_DESC);
txbuf++;
}
if (skb_is_gso(skb))
n_descs++;
dev_kfree_skb_any(skb);
next:
tx_ring->rd_p += n_descs;
}
memset(tx_ring->txds, 0, tx_ring->size);
tx_ring->data_pending = 0;
tx_ring->wr_p = 0;
tx_ring->rd_p = 0;
tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0;
if (tx_ring->is_xdp || !dp->netdev)
return;
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q);
}
static void nfp_nfdk_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
kvfree(tx_ring->ktxbufs);
if (tx_ring->ktxds)
dma_free_coherent(dp->dev, tx_ring->size,
tx_ring->ktxds, tx_ring->dma);
tx_ring->cnt = 0;
tx_ring->txbufs = NULL;
tx_ring->txds = NULL;
tx_ring->dma = 0;
tx_ring->size = 0;
}
static int
nfp_nfdk_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
tx_ring->cnt = dp->txd_cnt * NFDK_TX_DESC_PER_SIMPLE_PKT;
tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->ktxds));
tx_ring->ktxds = dma_alloc_coherent(dp->dev, tx_ring->size,
&tx_ring->dma,
GFP_KERNEL | __GFP_NOWARN);
if (!tx_ring->ktxds) {
netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
tx_ring->cnt);
goto err_alloc;
}
tx_ring->ktxbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->ktxbufs),
GFP_KERNEL);
if (!tx_ring->ktxbufs)
goto err_alloc;
if (!tx_ring->is_xdp && dp->netdev)
netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
tx_ring->idx);
return 0;
err_alloc:
nfp_nfdk_tx_ring_free(tx_ring);
return -ENOMEM;
}
static void
nfp_nfdk_tx_ring_bufs_free(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
}
static int
nfp_nfdk_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
return 0;
}
static void
nfp_nfdk_print_tx_descs(struct seq_file *file,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
u32 d_rd_p, u32 d_wr_p)
{
struct nfp_nfdk_tx_desc *txd;
u32 txd_cnt = tx_ring->cnt;
int i;
for (i = 0; i < txd_cnt; i++) {
txd = &tx_ring->ktxds[i];
seq_printf(file, "%04d: 0x%08x 0x%08x 0x%016llx", i,
txd->vals[0], txd->vals[1], tx_ring->ktxbufs[i].raw);
if (i == tx_ring->rd_p % txd_cnt)
seq_puts(file, " H_RD");
if (i == tx_ring->wr_p % txd_cnt)
seq_puts(file, " H_WR");
if (i == d_rd_p % txd_cnt)
seq_puts(file, " D_RD");
if (i == d_wr_p % txd_cnt)
seq_puts(file, " D_WR");
seq_putc(file, '\n');
}
}
#define NFP_NFDK_CFG_CTRL_SUPPORTED \
(NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_CTRL_PROMISC | \
NFP_NET_CFG_CTRL_L2BC | NFP_NET_CFG_CTRL_L2MC | \
NFP_NET_CFG_CTRL_RXCSUM | NFP_NET_CFG_CTRL_TXCSUM | \
NFP_NET_CFG_CTRL_RXVLAN | \
NFP_NET_CFG_CTRL_GATHER | NFP_NET_CFG_CTRL_LSO | \
NFP_NET_CFG_CTRL_CTAG_FILTER | NFP_NET_CFG_CTRL_CMSG_DATA | \
NFP_NET_CFG_CTRL_RINGCFG | NFP_NET_CFG_CTRL_IRQMOD | \
NFP_NET_CFG_CTRL_TXRWB | \
NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE | \
NFP_NET_CFG_CTRL_BPF | NFP_NET_CFG_CTRL_LSO2 | \
NFP_NET_CFG_CTRL_RSS2 | NFP_NET_CFG_CTRL_CSUM_COMPLETE | \
NFP_NET_CFG_CTRL_LIVE_ADDR)
const struct nfp_dp_ops nfp_nfdk_ops = {
.version = NFP_NFD_VER_NFDK,
.tx_min_desc_per_pkt = NFDK_TX_DESC_PER_SIMPLE_PKT,
.cap_mask = NFP_NFDK_CFG_CTRL_SUPPORTED,
.poll = nfp_nfdk_poll,
.ctrl_poll = nfp_nfdk_ctrl_poll,
.xmit = nfp_nfdk_tx,
.ctrl_tx_one = nfp_nfdk_ctrl_tx_one,
.rx_ring_fill_freelist = nfp_nfdk_rx_ring_fill_freelist,
.tx_ring_alloc = nfp_nfdk_tx_ring_alloc,
.tx_ring_reset = nfp_nfdk_tx_ring_reset,
.tx_ring_free = nfp_nfdk_tx_ring_free,
.tx_ring_bufs_alloc = nfp_nfdk_tx_ring_bufs_alloc,
.tx_ring_bufs_free = nfp_nfdk_tx_ring_bufs_free,
.print_tx_descs = nfp_nfdk_print_tx_descs
};
......@@ -98,12 +98,19 @@
/* Forward declarations */
struct nfp_cpp;
struct nfp_dev_info;
struct nfp_dp_ops;
struct nfp_eth_table_port;
struct nfp_net;
struct nfp_net_r_vector;
struct nfp_port;
struct xsk_buff_pool;
struct nfp_nfd3_tx_desc;
struct nfp_nfd3_tx_buf;
struct nfp_nfdk_tx_desc;
struct nfp_nfdk_tx_buf;
/* Convenience macro for wrapping descriptor index on ring size */
#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
......@@ -117,97 +124,25 @@ struct xsk_buff_pool;
__d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
} while (0)
/* TX descriptor format */
#define PCIE_DESC_TX_EOP BIT(7)
#define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0)
#define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0)
/* Flags in the host TX descriptor */
#define PCIE_DESC_TX_CSUM BIT(7)
#define PCIE_DESC_TX_IP4_CSUM BIT(6)
#define PCIE_DESC_TX_TCP_CSUM BIT(5)
#define PCIE_DESC_TX_UDP_CSUM BIT(4)
#define PCIE_DESC_TX_VLAN BIT(3)
#define PCIE_DESC_TX_LSO BIT(2)
#define PCIE_DESC_TX_ENCAP BIT(1)
#define PCIE_DESC_TX_O_IP4_CSUM BIT(0)
struct nfp_net_tx_desc {
union {
struct {
u8 dma_addr_hi; /* High bits of host buf address */
__le16 dma_len; /* Length to DMA for this desc */
u8 offset_eop; /* Offset in buf where pkt starts +
* highest bit is eop flag.
*/
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
__le16 mss; /* MSS to be used for LSO */
u8 lso_hdrlen; /* LSO, TCP payload offset */
u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */
union {
struct {
u8 l3_offset; /* L3 header offset */
u8 l4_offset; /* L4 header offset */
};
__le16 vlan; /* VLAN tag to add if indicated */
};
__le16 data_len; /* Length of frame + meta data */
} __packed;
__le32 vals[4];
__le64 vals8[2];
};
};
/**
* struct nfp_net_tx_buf - software TX buffer descriptor
* @skb: normal ring, sk_buff associated with this buffer
* @frag: XDP ring, page frag associated with this buffer
* @xdp: XSK buffer pool handle (for AF_XDP)
* @dma_addr: DMA mapping address of the buffer
* @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags)
* @pkt_cnt: Number of packets to be produced out of the skb associated
* with this buffer (valid only on the head's buffer).
* Will be 1 for all non-TSO packets.
* @is_xsk_tx: Flag if buffer is a RX buffer after a XDP_TX action and not a
* buffer from the TX queue (for AF_XDP).
* @real_len: Number of bytes which to be produced out of the skb (valid only
* on the head's buffer). Equal to skb->len for non-TSO packets.
*/
struct nfp_net_tx_buf {
union {
struct sk_buff *skb;
void *frag;
struct xdp_buff *xdp;
};
dma_addr_t dma_addr;
union {
struct {
short int fidx;
u16 pkt_cnt;
};
struct {
bool is_xsk_tx;
};
};
u32 real_len;
};
/**
* struct nfp_net_tx_ring - TX ring structure
* @r_vec: Back pointer to ring vector structure
* @idx: Ring index from Linux's perspective
* @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue
* @data_pending: number of bytes added to current block (NFDK only)
* @qcp_q: Pointer to base of the QCP TX queue
* @txrwb: TX pointer write back area
* @cnt: Size of the queue in number of descriptors
* @wr_p: TX ring write pointer (free running)
* @rd_p: TX ring read pointer (free running)
* @qcp_rd_p: Local copy of QCP TX queue read pointer
* @wr_ptr_add: Accumulated number of buffers to add to QCP write pointer
* (used for .xmit_more delayed kick)
* @txbufs: Array of transmitted TX buffers, to free on transmit
* @txds: Virtual address of TX ring in host memory
* @txbufs: Array of transmitted TX buffers, to free on transmit (NFD3)
* @ktxbufs: Array of transmitted TX buffers, to free on transmit (NFDK)
* @txds: Virtual address of TX ring in host memory (NFD3)
* @ktxds: Virtual address of TX ring in host memory (NFDK)
*
* @qcidx: Queue Controller Peripheral (QCP) queue index for the TX queue
* @dma: DMA address of the TX ring
* @size: Size, in bytes, of the TX ring (needed to free)
* @is_xdp: Is this a XDP TX ring?
......@@ -215,9 +150,10 @@ struct nfp_net_tx_buf {
struct nfp_net_tx_ring {
struct nfp_net_r_vector *r_vec;
u32 idx;
int qcidx;
u16 idx;
u16 data_pending;
u8 __iomem *qcp_q;
u64 *txrwb;
u32 cnt;
u32 wr_p;
......@@ -226,8 +162,17 @@ struct nfp_net_tx_ring {
u32 wr_ptr_add;
struct nfp_net_tx_buf *txbufs;
struct nfp_net_tx_desc *txds;
union {
struct nfp_nfd3_tx_buf *txbufs;
struct nfp_nfdk_tx_buf *ktxbufs;
};
union {
struct nfp_nfd3_tx_desc *txds;
struct nfp_nfdk_tx_desc *ktxds;
};
/* Cold data follows */
int qcidx;
dma_addr_t dma;
size_t size;
......@@ -479,13 +424,17 @@ struct nfp_net_fw_version {
u8 minor;
u8 major;
u8 class;
u8 resv;
/* This byte can be exploited for more use, currently,
* BIT0: dp type, BIT[7:1]: reserved
*/
u8 extend;
} __packed;
static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
u8 resv, u8 class, u8 major, u8 minor)
u8 extend, u8 class, u8 major, u8 minor)
{
return fw_ver->resv == resv &&
return fw_ver->extend == extend &&
fw_ver->class == class &&
fw_ver->major == major &&
fw_ver->minor == minor;
......@@ -513,8 +462,11 @@ struct nfp_stat_pair {
* @rx_rings: Array of pre-allocated RX ring structures
* @ctrl_bar: Pointer to mapped control BAR
*
* @txd_cnt: Size of the TX ring in number of descriptors
* @rxd_cnt: Size of the RX ring in number of descriptors
* @ops: Callbacks and parameters for this vNIC's NFD version
* @txrwb: TX pointer write back area (indexed by queue id)
* @txrwb_dma: TX pointer write back area DMA address
* @txd_cnt: Size of the TX ring in number of min size packets
* @rxd_cnt: Size of the RX ring in number of min size packets
* @num_r_vecs: Number of used ring vectors
* @num_tx_rings: Currently configured number of TX rings
* @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
......@@ -547,6 +499,11 @@ struct nfp_net_dp {
/* Cold data follows */
const struct nfp_dp_ops *ops;
u64 *txrwb;
dma_addr_t txrwb_dma;
unsigned int txd_cnt;
unsigned int rxd_cnt;
......@@ -915,11 +872,13 @@ static inline void nn_ctrl_bar_unlock(struct nfp_net *nn)
/* Globals */
extern const char nfp_driver_version[];
extern const struct net_device_ops nfp_net_netdev_ops;
extern const struct net_device_ops nfp_nfd3_netdev_ops;
extern const struct net_device_ops nfp_nfdk_netdev_ops;
static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev)
{
return netdev->netdev_ops == &nfp_net_netdev_ops;
return netdev->netdev_ops == &nfp_nfd3_netdev_ops ||
netdev->netdev_ops == &nfp_nfdk_netdev_ops;
}
static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts)
......@@ -960,7 +919,6 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr);
unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
unsigned int min_irqs, unsigned int want_irqs);
......@@ -968,19 +926,10 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n);
void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring);
void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget);
bool
nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len);
void nfp_net_rx_csum(const struct nfp_net_dp *dp,
struct nfp_net_r_vector *r_vec,
const struct nfp_net_rx_desc *rxd,
const struct nfp_meta_parsed *meta,
struct sk_buff *skb);
struct sk_buff *
nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, u64 *tls_handle, int *nr_frags);
void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle);
struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
......
......@@ -149,7 +149,10 @@
* - define more STS bits
*/
#define NFP_NET_CFG_VERSION 0x0030
#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24)
#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xfe << 24)
#define NFP_NET_CFG_VERSION_DP_NFD3 0
#define NFP_NET_CFG_VERSION_DP_NFDK 1
#define NFP_NET_CFG_VERSION_DP_MASK 1
#define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16)
#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16)
#define NFP_NET_CFG_VERSION_CLASS_GENERIC 0
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include "nfp_net.h"
#include "nfp_net_dp.h"
static struct dentry *nfp_dir;
......@@ -80,10 +81,8 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
{
struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_tx_desc *txd;
int d_rd_p, d_wr_p, txd_cnt;
struct nfp_net *nn;
int i;
int d_rd_p, d_wr_p;
rtnl_lock();
......@@ -97,52 +96,20 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
if (!nfp_net_running(nn))
goto out;
txd_cnt = tx_ring->cnt;
d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u\n",
seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u",
tx_ring->idx, tx_ring->qcidx,
tx_ring == r_vec->tx_ring ? "" : "xdp",
tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
for (i = 0; i < txd_cnt; i++) {
struct xdp_buff *xdp;
struct sk_buff *skb;
txd = &tx_ring->txds[i];
seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
txd->vals[0], txd->vals[1],
txd->vals[2], txd->vals[3]);
if (!tx_ring->is_xdp) {
skb = READ_ONCE(tx_ring->txbufs[i].skb);
if (skb)
seq_printf(file, " skb->head=%p skb->data=%p",
skb->head, skb->data);
} else {
xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
if (xdp)
seq_printf(file, " xdp->data=%p", xdp->data);
}
if (tx_ring->txbufs[i].dma_addr)
seq_printf(file, " dma_addr=%pad",
&tx_ring->txbufs[i].dma_addr);
if (i == tx_ring->rd_p % txd_cnt)
seq_puts(file, " H_RD");
if (i == tx_ring->wr_p % txd_cnt)
seq_puts(file, " H_WR");
if (i == d_rd_p % txd_cnt)
seq_puts(file, " D_RD");
if (i == d_wr_p % txd_cnt)
seq_puts(file, " D_WR");
if (tx_ring->txrwb)
seq_printf(file, " TXRWB=%llu", *tx_ring->txrwb);
seq_putc(file, '\n');
}
nfp_net_debugfs_print_tx_descs(file, &nn->dp, r_vec, tx_ring,
d_rd_p, d_wr_p);
out:
rtnl_unlock();
return 0;
......
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2019 Netronome Systems, Inc. */
#ifndef _NFP_NET_DP_
#define _NFP_NET_DP_
#include "nfp_net.h"
static inline dma_addr_t nfp_net_dma_map_rx(struct nfp_net_dp *dp, void *frag)
{
return dma_map_single_attrs(dp->dev, frag + NFP_NET_RX_BUF_HEADROOM,
dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
}
static inline void
nfp_net_dma_sync_dev_rx(const struct nfp_net_dp *dp, dma_addr_t dma_addr)
{
dma_sync_single_for_device(dp->dev, dma_addr,
dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
dp->rx_dma_dir);
}
static inline void nfp_net_dma_unmap_rx(struct nfp_net_dp *dp,
dma_addr_t dma_addr)
{
dma_unmap_single_attrs(dp->dev, dma_addr,
dp->fl_bufsz - NFP_NET_RX_BUF_NON_DATA,
dp->rx_dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
}
static inline void nfp_net_dma_sync_cpu_rx(struct nfp_net_dp *dp,
dma_addr_t dma_addr,
unsigned int len)
{
dma_sync_single_for_cpu(dp->dev, dma_addr - NFP_NET_RX_BUF_HEADROOM,
len, dp->rx_dma_dir);
}
/**
* nfp_net_tx_full() - check if the TX ring is full
* @tx_ring: TX ring to check
* @dcnt: Number of descriptors that need to be enqueued (must be >= 1)
*
* This function checks, based on the *host copy* of read/write
* pointer if a given TX ring is full. The real TX queue may have
* some newly made available slots.
*
* Return: True if the ring is full.
*/
static inline int nfp_net_tx_full(struct nfp_net_tx_ring *tx_ring, int dcnt)
{
return (tx_ring->wr_p - tx_ring->rd_p) >= (tx_ring->cnt - dcnt);
}
static inline void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
{
wmb(); /* drain writebuffer */
nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
tx_ring->wr_ptr_add = 0;
}
static inline u32
nfp_net_read_tx_cmpl(struct nfp_net_tx_ring *tx_ring, struct nfp_net_dp *dp)
{
if (tx_ring->txrwb)
return *tx_ring->txrwb;
return nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
}
static inline void nfp_net_free_frag(void *frag, bool xdp)
{
if (!xdp)
skb_free_frag(frag);
else
__free_page(virt_to_page(frag));
}
/**
* nfp_net_irq_unmask() - Unmask automasked interrupt
* @nn: NFP Network structure
* @entry_nr: MSI-X table entry
*
* Clear the ICR for the IRQ entry.
*/
static inline void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr)
{
nn_writeb(nn, NFP_NET_CFG_ICR(entry_nr), NFP_NET_CFG_ICR_UNMASKED);
nn_pci_flush(nn);
}
struct seq_file;
/* Common */
void
nfp_net_rx_ring_hw_cfg_write(struct nfp_net *nn,
struct nfp_net_rx_ring *rx_ring, unsigned int idx);
void
nfp_net_tx_ring_hw_cfg_write(struct nfp_net *nn,
struct nfp_net_tx_ring *tx_ring, unsigned int idx);
void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx);
void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr);
int nfp_net_rx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
int nfp_net_tx_rings_prepare(struct nfp_net *nn, struct nfp_net_dp *dp);
void nfp_net_rx_rings_free(struct nfp_net_dp *dp);
void nfp_net_tx_rings_free(struct nfp_net_dp *dp);
void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring);
enum nfp_nfd_version {
NFP_NFD_VER_NFD3,
NFP_NFD_VER_NFDK,
};
/**
* struct nfp_dp_ops - Hooks to wrap different implementation of different dp
* @version: Indicate dp type
* @tx_min_desc_per_pkt: Minimal TX descs needed for each packet
* @cap_mask: Mask of supported features
* @poll: Napi poll for normal rx/tx
* @xsk_poll: Napi poll when xsk is enabled
* @ctrl_poll: Tasklet poll for ctrl rx/tx
* @xmit: Xmit for normal path
* @ctrl_tx_one: Xmit for ctrl path
* @rx_ring_fill_freelist: Give buffers from the ring to FW
* @tx_ring_alloc: Allocate resource for a TX ring
* @tx_ring_reset: Free any untransmitted buffers and reset pointers
* @tx_ring_free: Free resources allocated to a TX ring
* @tx_ring_bufs_alloc: Allocate resource for each TX buffer
* @tx_ring_bufs_free: Free resources allocated to each TX buffer
* @print_tx_descs: Show TX ring's info for debug purpose
*/
struct nfp_dp_ops {
enum nfp_nfd_version version;
unsigned int tx_min_desc_per_pkt;
u32 cap_mask;
int (*poll)(struct napi_struct *napi, int budget);
int (*xsk_poll)(struct napi_struct *napi, int budget);
void (*ctrl_poll)(struct tasklet_struct *t);
netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *netdev);
bool (*ctrl_tx_one)(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct sk_buff *skb, bool old);
void (*rx_ring_fill_freelist)(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
int (*tx_ring_alloc)(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void (*tx_ring_reset)(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void (*tx_ring_free)(struct nfp_net_tx_ring *tx_ring);
int (*tx_ring_bufs_alloc)(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void (*tx_ring_bufs_free)(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void (*print_tx_descs)(struct seq_file *file,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
u32 d_rd_p, u32 d_wr_p);
};
static inline void
nfp_net_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
return dp->ops->tx_ring_reset(dp, tx_ring);
}
static inline void
nfp_net_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring)
{
dp->ops->rx_ring_fill_freelist(dp, rx_ring);
}
static inline int
nfp_net_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
return dp->ops->tx_ring_alloc(dp, tx_ring);
}
static inline void
nfp_net_tx_ring_free(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
dp->ops->tx_ring_free(tx_ring);
}
static inline int
nfp_net_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
return dp->ops->tx_ring_bufs_alloc(dp, tx_ring);
}
static inline void
nfp_net_tx_ring_bufs_free(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
dp->ops->tx_ring_bufs_free(dp, tx_ring);
}
static inline void
nfp_net_debugfs_print_tx_descs(struct seq_file *file, struct nfp_net_dp *dp,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
u32 d_rd_p, u32 d_wr_p)
{
dp->ops->print_tx_descs(file, r_vec, tx_ring, d_rd_p, d_wr_p);
}
extern const struct nfp_dp_ops nfp_nfd3_ops;
extern const struct nfp_dp_ops nfp_nfdk_ops;
netdev_tx_t nfp_net_tx(struct sk_buff *skb, struct net_device *netdev);
#endif /* _NFP_NET_DP_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment