Commit d70a2a45 authored by David S. Miller's avatar David S. Miller

Merge branch 'ionic-xdp-support'

Shannon Nelson says:

====================
ionic: add XDP support

This patchset is new support in ionic for XDP processing,
including basic XDP on Rx packets, TX and REDIRECT, and frags
for jumbo frames.

Since ionic has not yet been converted to use the page_pool APIs,
this uses the simple MEM_TYPE_PAGE_ORDER0 buffering.  There are plans
to convert the driver in the near future.

v4:
 - removed "inline" from short utility functions
 - changed to use "goto err_out" in ionic_xdp_register_rxq_info()
 - added "continue" to reduce nesting in ionic_xdp_queues_config()
 - used xdp_prog in ionic_rx_clean() to flag whether or not to sync
   the rx buffer after calling ionix_xdp_run()
 - swapped order of XDP_TX and XDP_REDIRECT cases in ionic_xdp_run()
   to make patch 6 a little cleaner

v3:
https://lore.kernel.org/netdev/20240210004827.53814-1-shannon.nelson@amd.com/
 - removed budget==0 patch, sent it separately to net

v2:
https://lore.kernel.org/netdev/20240208005725.65134-1-shannon.nelson@amd.com/
 - added calls to txq_trans_cond_update()
 - added a new patch to catch NAPI budget==0

v1:
https://lore.kernel.org/netdev/20240130013042.11586-1-shannon.nelson@amd.com/

RFC:
https://lore.kernel.org/netdev/20240118192500.58665-1-shannon.nelson@amd.com/
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2210c548 5377805d
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/bpf_trace.h>
#include "ionic_if.h" #include "ionic_if.h"
#include "ionic_regs.h" #include "ionic_regs.h"
...@@ -195,6 +196,11 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q, ...@@ -195,6 +196,11 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\ #define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
__GFP_COMP | __GFP_MEMALLOC) __GFP_COMP | __GFP_MEMALLOC)
#define IONIC_XDP_MAX_LINEAR_MTU (IONIC_PAGE_SIZE - \
(VLAN_ETH_HLEN + \
XDP_PACKET_HEADROOM + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))))
struct ionic_buf_info { struct ionic_buf_info {
struct page *page; struct page *page;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -222,6 +228,8 @@ struct ionic_desc_info { ...@@ -222,6 +228,8 @@ struct ionic_desc_info {
struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1]; struct ionic_buf_info bufs[MAX_SKB_FRAGS + 1];
ionic_desc_cb cb; ionic_desc_cb cb;
void *cb_arg; void *cb_arg;
struct xdp_frame *xdpf;
enum xdp_action act;
}; };
#define IONIC_QUEUE_NAME_MAX_SZ 16 #define IONIC_QUEUE_NAME_MAX_SZ 16
...@@ -256,6 +264,9 @@ struct ionic_queue { ...@@ -256,6 +264,9 @@ struct ionic_queue {
struct ionic_txq_sg_desc *txq_sgl; struct ionic_txq_sg_desc *txq_sgl;
struct ionic_rxq_sg_desc *rxq_sgl; struct ionic_rxq_sg_desc *rxq_sgl;
}; };
struct xdp_rxq_info *xdp_rxq_info;
struct ionic_queue *partner;
bool xdp_flush;
dma_addr_t base_pa; dma_addr_t base_pa;
dma_addr_t cmb_base_pa; dma_addr_t cmb_base_pa;
dma_addr_t sg_base_pa; dma_addr_t sg_base_pa;
......
...@@ -721,6 +721,11 @@ static int ionic_set_channels(struct net_device *netdev, ...@@ -721,6 +721,11 @@ static int ionic_set_channels(struct net_device *netdev,
ionic_init_queue_params(lif, &qparam); ionic_init_queue_params(lif, &qparam);
if ((ch->rx_count || ch->tx_count) && lif->xdp_prog) {
netdev_info(lif->netdev, "Split Tx/Rx interrupts not available when using XDP\n");
return -EOPNOTSUPP;
}
if (ch->rx_count != ch->tx_count) { if (ch->rx_count != ch->tx_count) {
netdev_info(netdev, "The rx and tx count must be equal\n"); netdev_info(netdev, "The rx and tx count must be equal\n");
return -EINVAL; return -EINVAL;
......
...@@ -46,6 +46,9 @@ static int ionic_start_queues(struct ionic_lif *lif); ...@@ -46,6 +46,9 @@ static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif); static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif); static void ionic_lif_queue_identify(struct ionic_lif *lif);
static int ionic_xdp_queues_config(struct ionic_lif *lif);
static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q);
static void ionic_dim_work(struct work_struct *work) static void ionic_dim_work(struct work_struct *work)
{ {
struct dim *dim = container_of(work, struct dim, work); struct dim *dim = container_of(work, struct dim, work);
...@@ -422,6 +425,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq) ...@@ -422,6 +425,7 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->sg_base_pa = 0; qcq->sg_base_pa = 0;
} }
ionic_xdp_unregister_rxq_info(&qcq->q);
ionic_qcq_intr_free(lif, qcq); ionic_qcq_intr_free(lif, qcq);
vfree(qcq->cq.info); vfree(qcq->cq.info);
...@@ -862,8 +866,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) ...@@ -862,8 +866,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
.type = q->type, .type = q->type,
.ver = lif->qtype_info[q->type].version, .ver = lif->qtype_info[q->type].version,
.index = cpu_to_le32(q->index), .index = cpu_to_le32(q->index),
.flags = cpu_to_le16(IONIC_QINIT_F_IRQ | .flags = cpu_to_le16(IONIC_QINIT_F_IRQ),
IONIC_QINIT_F_SG),
.intr_index = cpu_to_le16(cq->bound_intr->index), .intr_index = cpu_to_le16(cq->bound_intr->index),
.pid = cpu_to_le16(q->pid), .pid = cpu_to_le16(q->pid),
.ring_size = ilog2(q->num_descs), .ring_size = ilog2(q->num_descs),
...@@ -875,6 +878,13 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) ...@@ -875,6 +878,13 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
}; };
int err; int err;
q->partner = &lif->txqcqs[q->index]->q;
q->partner->partner = q;
if (!lif->xdp_prog ||
(lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags))
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG);
if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB); ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_CMB);
ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa); ctx.cmd.q_init.ring_base = cpu_to_le64(qcq->cmb_q_base_pa);
...@@ -1640,6 +1650,12 @@ static int ionic_init_nic_features(struct ionic_lif *lif) ...@@ -1640,6 +1650,12 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev->priv_flags |= IFF_UNICAST_FLT | netdev->priv_flags |= IFF_UNICAST_FLT |
IFF_LIVE_ADDR_CHANGE; IFF_LIVE_ADDR_CHANGE;
netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
return 0; return 0;
} }
...@@ -1777,6 +1793,21 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif) ...@@ -1777,6 +1793,21 @@ static int ionic_start_queues_reconfig(struct ionic_lif *lif)
return err; return err;
} }
static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu,
struct bpf_prog *xdp_prog)
{
if (!xdp_prog)
return true;
if (mtu <= IONIC_XDP_MAX_LINEAR_MTU)
return true;
if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags)
return true;
return false;
}
static int ionic_change_mtu(struct net_device *netdev, int new_mtu) static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
{ {
struct ionic_lif *lif = netdev_priv(netdev); struct ionic_lif *lif = netdev_priv(netdev);
...@@ -1789,8 +1820,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -1789,8 +1820,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
.mtu = cpu_to_le32(new_mtu), .mtu = cpu_to_le32(new_mtu),
}, },
}; };
struct bpf_prog *xdp_prog;
int err; int err;
xdp_prog = READ_ONCE(lif->xdp_prog);
if (!ionic_xdp_is_valid_mtu(lif, new_mtu, xdp_prog))
return -EINVAL;
err = ionic_adminq_post_wait(lif, &ctx); err = ionic_adminq_post_wait(lif, &ctx);
if (err) if (err)
return err; return err;
...@@ -2166,6 +2202,10 @@ static int ionic_txrx_enable(struct ionic_lif *lif) ...@@ -2166,6 +2202,10 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
int derr = 0; int derr = 0;
int i, err; int i, err;
err = ionic_xdp_queues_config(lif);
if (err)
return err;
for (i = 0; i < lif->nxqs; i++) { for (i = 0; i < lif->nxqs; i++) {
if (!(lif->rxqcqs[i] && lif->txqcqs[i])) { if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i); dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
...@@ -2211,6 +2251,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif) ...@@ -2211,6 +2251,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr); derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
} }
ionic_xdp_queues_config(lif);
return err; return err;
} }
...@@ -2668,11 +2710,151 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif) ...@@ -2668,11 +2710,151 @@ static void ionic_vf_attr_replay(struct ionic_lif *lif)
ionic_vf_start(ionic); ionic_vf_start(ionic);
} }
static void ionic_xdp_unregister_rxq_info(struct ionic_queue *q)
{
struct xdp_rxq_info *xi;
if (!q->xdp_rxq_info)
return;
xi = q->xdp_rxq_info;
q->xdp_rxq_info = NULL;
xdp_rxq_info_unreg(xi);
kfree(xi);
}
static int ionic_xdp_register_rxq_info(struct ionic_queue *q, unsigned int napi_id)
{
struct xdp_rxq_info *rxq_info;
int err;
rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
if (!rxq_info)
return -ENOMEM;
err = xdp_rxq_info_reg(rxq_info, q->lif->netdev, q->index, napi_id);
if (err) {
dev_err(q->dev, "Queue %d xdp_rxq_info_reg failed, err %d\n",
q->index, err);
goto err_out;
}
err = xdp_rxq_info_reg_mem_model(rxq_info, MEM_TYPE_PAGE_ORDER0, NULL);
if (err) {
dev_err(q->dev, "Queue %d xdp_rxq_info_reg_mem_model failed, err %d\n",
q->index, err);
xdp_rxq_info_unreg(rxq_info);
goto err_out;
}
q->xdp_rxq_info = rxq_info;
return 0;
err_out:
kfree(rxq_info);
return err;
}
static int ionic_xdp_queues_config(struct ionic_lif *lif)
{
unsigned int i;
int err;
if (!lif->rxqcqs)
return 0;
/* There's no need to rework memory if not going to/from NULL program.
* If there is no lif->xdp_prog, there should also be no q.xdp_rxq_info
* This way we don't need to keep an *xdp_prog in every queue struct.
*/
if (!lif->xdp_prog == !lif->rxqcqs[0]->q.xdp_rxq_info)
return 0;
for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
struct ionic_queue *q = &lif->rxqcqs[i]->q;
if (q->xdp_rxq_info) {
ionic_xdp_unregister_rxq_info(q);
continue;
}
err = ionic_xdp_register_rxq_info(q, lif->rxqcqs[i]->napi.napi_id);
if (err) {
dev_err(lif->ionic->dev, "failed to register RX queue %d info for XDP, err %d\n",
i, err);
goto err_out;
}
}
return 0;
err_out:
for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++)
ionic_xdp_unregister_rxq_info(&lif->rxqcqs[i]->q);
return err;
}
static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct bpf_prog *old_prog;
u32 maxfs;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
#define XDP_ERR_SPLIT "XDP not available with split Tx/Rx interrupts"
NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_SPLIT);
netdev_info(lif->netdev, XDP_ERR_SPLIT);
return -EOPNOTSUPP;
}
if (!ionic_xdp_is_valid_mtu(lif, netdev->mtu, bpf->prog)) {
#define XDP_ERR_MTU "MTU is too large for XDP without frags support"
NL_SET_ERR_MSG_MOD(bpf->extack, XDP_ERR_MTU);
netdev_info(lif->netdev, XDP_ERR_MTU);
return -EINVAL;
}
maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags))
maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU);
netdev->max_mtu = maxfs;
if (!netif_running(netdev)) {
old_prog = xchg(&lif->xdp_prog, bpf->prog);
} else {
mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
old_prog = xchg(&lif->xdp_prog, bpf->prog);
ionic_start_queues_reconfig(lif);
mutex_unlock(&lif->queue_lock);
}
if (old_prog)
bpf_prog_put(old_prog);
return 0;
}
static int ionic_xdp(struct net_device *netdev, struct netdev_bpf *bpf)
{
switch (bpf->command) {
case XDP_SETUP_PROG:
return ionic_xdp_config(netdev, bpf);
default:
return -EINVAL;
}
}
static const struct net_device_ops ionic_netdev_ops = { static const struct net_device_ops ionic_netdev_ops = {
.ndo_open = ionic_open, .ndo_open = ionic_open,
.ndo_stop = ionic_stop, .ndo_stop = ionic_stop,
.ndo_eth_ioctl = ionic_eth_ioctl, .ndo_eth_ioctl = ionic_eth_ioctl,
.ndo_start_xmit = ionic_start_xmit, .ndo_start_xmit = ionic_start_xmit,
.ndo_bpf = ionic_xdp,
.ndo_xdp_xmit = ionic_xdp_xmit,
.ndo_get_stats64 = ionic_get_stats64, .ndo_get_stats64 = ionic_get_stats64,
.ndo_set_rx_mode = ionic_ndo_set_rx_mode, .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
.ndo_set_features = ionic_set_features, .ndo_set_features = ionic_set_features,
...@@ -2755,6 +2937,8 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b) ...@@ -2755,6 +2937,8 @@ static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
swap(a->q.base, b->q.base); swap(a->q.base, b->q.base);
swap(a->q.base_pa, b->q.base_pa); swap(a->q.base_pa, b->q.base_pa);
swap(a->q.info, b->q.info); swap(a->q.info, b->q.info);
swap(a->q.xdp_rxq_info, b->q.xdp_rxq_info);
swap(a->q.partner, b->q.partner);
swap(a->q_base, b->q_base); swap(a->q_base, b->q_base);
swap(a->q_base_pa, b->q_base_pa); swap(a->q_base_pa, b->q_base_pa);
swap(a->q_size, b->q_size); swap(a->q_size, b->q_size);
...@@ -3391,9 +3575,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif) ...@@ -3391,9 +3575,12 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
napi_enable(&qcq->napi); napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR) if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
&qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR); IONIC_INTR_MASK_CLEAR);
}
qcq->flags |= IONIC_QCQ_F_INITED; qcq->flags |= IONIC_QCQ_F_INITED;
......
...@@ -37,6 +37,7 @@ struct ionic_tx_stats { ...@@ -37,6 +37,7 @@ struct ionic_tx_stats {
u64 dma_map_err; u64 dma_map_err;
u64 hwstamp_valid; u64 hwstamp_valid;
u64 hwstamp_invalid; u64 hwstamp_invalid;
u64 xdp_frames;
}; };
struct ionic_rx_stats { struct ionic_rx_stats {
...@@ -51,6 +52,11 @@ struct ionic_rx_stats { ...@@ -51,6 +52,11 @@ struct ionic_rx_stats {
u64 alloc_err; u64 alloc_err;
u64 hwstamp_valid; u64 hwstamp_valid;
u64 hwstamp_invalid; u64 hwstamp_invalid;
u64 xdp_drop;
u64 xdp_aborted;
u64 xdp_pass;
u64 xdp_tx;
u64 xdp_redirect;
}; };
#define IONIC_QCQ_F_INITED BIT(0) #define IONIC_QCQ_F_INITED BIT(0)
...@@ -135,6 +141,12 @@ struct ionic_lif_sw_stats { ...@@ -135,6 +141,12 @@ struct ionic_lif_sw_stats {
u64 hw_rx_over_errors; u64 hw_rx_over_errors;
u64 hw_rx_missed_errors; u64 hw_rx_missed_errors;
u64 hw_tx_aborted_errors; u64 hw_tx_aborted_errors;
u64 xdp_drop;
u64 xdp_aborted;
u64 xdp_pass;
u64 xdp_tx;
u64 xdp_redirect;
u64 xdp_frames;
}; };
enum ionic_lif_state_flags { enum ionic_lif_state_flags {
...@@ -230,6 +242,7 @@ struct ionic_lif { ...@@ -230,6 +242,7 @@ struct ionic_lif {
struct ionic_phc *phc; struct ionic_phc *phc;
struct dentry *dentry; struct dentry *dentry;
struct bpf_prog *xdp_prog;
}; };
struct ionic_phc { struct ionic_phc {
......
...@@ -27,6 +27,12 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = { ...@@ -27,6 +27,12 @@ static const struct ionic_stat_desc ionic_lif_stats_desc[] = {
IONIC_LIF_STAT_DESC(hw_rx_over_errors), IONIC_LIF_STAT_DESC(hw_rx_over_errors),
IONIC_LIF_STAT_DESC(hw_rx_missed_errors), IONIC_LIF_STAT_DESC(hw_rx_missed_errors),
IONIC_LIF_STAT_DESC(hw_tx_aborted_errors), IONIC_LIF_STAT_DESC(hw_tx_aborted_errors),
IONIC_LIF_STAT_DESC(xdp_drop),
IONIC_LIF_STAT_DESC(xdp_aborted),
IONIC_LIF_STAT_DESC(xdp_pass),
IONIC_LIF_STAT_DESC(xdp_tx),
IONIC_LIF_STAT_DESC(xdp_redirect),
IONIC_LIF_STAT_DESC(xdp_frames),
}; };
static const struct ionic_stat_desc ionic_port_stats_desc[] = { static const struct ionic_stat_desc ionic_port_stats_desc[] = {
...@@ -135,6 +141,7 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = { ...@@ -135,6 +141,7 @@ static const struct ionic_stat_desc ionic_tx_stats_desc[] = {
IONIC_TX_STAT_DESC(csum_none), IONIC_TX_STAT_DESC(csum_none),
IONIC_TX_STAT_DESC(csum), IONIC_TX_STAT_DESC(csum),
IONIC_TX_STAT_DESC(vlan_inserted), IONIC_TX_STAT_DESC(vlan_inserted),
IONIC_TX_STAT_DESC(xdp_frames),
}; };
static const struct ionic_stat_desc ionic_rx_stats_desc[] = { static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
...@@ -149,6 +156,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = { ...@@ -149,6 +156,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(hwstamp_invalid), IONIC_RX_STAT_DESC(hwstamp_invalid),
IONIC_RX_STAT_DESC(dropped), IONIC_RX_STAT_DESC(dropped),
IONIC_RX_STAT_DESC(vlan_stripped), IONIC_RX_STAT_DESC(vlan_stripped),
IONIC_RX_STAT_DESC(xdp_drop),
IONIC_RX_STAT_DESC(xdp_aborted),
IONIC_RX_STAT_DESC(xdp_pass),
IONIC_RX_STAT_DESC(xdp_tx),
IONIC_RX_STAT_DESC(xdp_redirect),
}; };
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc) #define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
...@@ -171,6 +183,7 @@ static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num, ...@@ -171,6 +183,7 @@ static void ionic_add_lif_txq_stats(struct ionic_lif *lif, int q_num,
stats->tx_csum += txstats->csum; stats->tx_csum += txstats->csum;
stats->tx_hwstamp_valid += txstats->hwstamp_valid; stats->tx_hwstamp_valid += txstats->hwstamp_valid;
stats->tx_hwstamp_invalid += txstats->hwstamp_invalid; stats->tx_hwstamp_invalid += txstats->hwstamp_invalid;
stats->xdp_frames += txstats->xdp_frames;
} }
static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num, static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
...@@ -185,6 +198,11 @@ static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num, ...@@ -185,6 +198,11 @@ static void ionic_add_lif_rxq_stats(struct ionic_lif *lif, int q_num,
stats->rx_csum_error += rxstats->csum_error; stats->rx_csum_error += rxstats->csum_error;
stats->rx_hwstamp_valid += rxstats->hwstamp_valid; stats->rx_hwstamp_valid += rxstats->hwstamp_valid;
stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid; stats->rx_hwstamp_invalid += rxstats->hwstamp_invalid;
stats->xdp_drop += rxstats->xdp_drop;
stats->xdp_aborted += rxstats->xdp_aborted;
stats->xdp_pass += rxstats->xdp_pass;
stats->xdp_tx += rxstats->xdp_tx;
stats->xdp_redirect += rxstats->xdp_redirect;
} }
static void ionic_get_lif_stats(struct ionic_lif *lif, static void ionic_get_lif_stats(struct ionic_lif *lif,
......
...@@ -17,4 +17,5 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev); ...@@ -17,4 +17,5 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev);
bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info); bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
int ionic_xdp_xmit(struct net_device *netdev, int n, struct xdp_frame **xdp, u32 flags);
#endif /* _IONIC_TXRX_H_ */ #endif /* _IONIC_TXRX_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment