Commit 5377805d authored by Shannon Nelson's avatar Shannon Nelson Committed by David S. Miller

ionic: implement xdp frags support

Add support for using scatter-gather / frags in XDP in both
Rx and Tx paths.
Co-developed-by: default avatarBrett Creeley <brett.creeley@amd.com>
Signed-off-by: default avatarBrett Creeley <brett.creeley@amd.com>
Signed-off-by: default avatarShannon Nelson <shannon.nelson@amd.com>
Reviewed-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 26f5726a
...@@ -881,7 +881,8 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) ...@@ -881,7 +881,8 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
q->partner = &lif->txqcqs[q->index]->q; q->partner = &lif->txqcqs[q->index]->q;
q->partner->partner = q; q->partner->partner = q;
if (!lif->xdp_prog) if (!lif->xdp_prog ||
(lif->xdp_prog->aux && lif->xdp_prog->aux->xdp_has_frags))
ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG); ctx.cmd.q_init.flags |= cpu_to_le16(IONIC_QINIT_F_SG);
if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) { if (qcq->flags & IONIC_QCQ_F_CMB_RINGS) {
...@@ -1651,7 +1652,9 @@ static int ionic_init_nic_features(struct ionic_lif *lif) ...@@ -1651,7 +1652,9 @@ static int ionic_init_nic_features(struct ionic_lif *lif)
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | netdev->xdp_features = NETDEV_XDP_ACT_BASIC |
NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_NDO_XMIT; NETDEV_XDP_ACT_RX_SG |
NETDEV_XDP_ACT_NDO_XMIT |
NETDEV_XDP_ACT_NDO_XMIT_SG;
return 0; return 0;
} }
...@@ -1799,6 +1802,9 @@ static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu, ...@@ -1799,6 +1802,9 @@ static bool ionic_xdp_is_valid_mtu(struct ionic_lif *lif, u32 mtu,
if (mtu <= IONIC_XDP_MAX_LINEAR_MTU) if (mtu <= IONIC_XDP_MAX_LINEAR_MTU)
return true; return true;
if (xdp_prog->aux && xdp_prog->aux->xdp_has_frags)
return true;
return false; return false;
} }
...@@ -2812,7 +2818,7 @@ static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf) ...@@ -2812,7 +2818,7 @@ static int ionic_xdp_config(struct net_device *netdev, struct netdev_bpf *bpf)
} }
maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN; maxfs = __le32_to_cpu(lif->identity->eth.max_frame_size) - VLAN_ETH_HLEN;
if (bpf->prog) if (bpf->prog && !(bpf->prog->aux && bpf->prog->aux->xdp_has_frags))
maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU); maxfs = min_t(u32, maxfs, IONIC_XDP_MAX_LINEAR_MTU);
netdev->max_mtu = maxfs; netdev->max_mtu = maxfs;
......
...@@ -15,6 +15,13 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs); ...@@ -15,6 +15,13 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs);
static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
void *data, size_t len); void *data, size_t len);
static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
const skb_frag_t *frag,
size_t offset, size_t len);
static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
struct ionic_desc_info *desc_info);
static void ionic_tx_clean(struct ionic_queue *q, static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_desc_info *desc_info, struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, struct ionic_cq_info *cq_info,
...@@ -313,6 +320,7 @@ static void ionic_xdp_tx_desc_clean(struct ionic_queue *q, ...@@ -313,6 +320,7 @@ static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
unsigned int nbufs = desc_info->nbufs; unsigned int nbufs = desc_info->nbufs;
struct ionic_buf_info *buf_info; struct ionic_buf_info *buf_info;
struct device *dev = q->dev; struct device *dev = q->dev;
int i;
if (!nbufs) if (!nbufs)
return; return;
...@@ -324,6 +332,15 @@ static void ionic_xdp_tx_desc_clean(struct ionic_queue *q, ...@@ -324,6 +332,15 @@ static void ionic_xdp_tx_desc_clean(struct ionic_queue *q,
__free_pages(buf_info->page, 0); __free_pages(buf_info->page, 0);
buf_info->page = NULL; buf_info->page = NULL;
buf_info++;
for (i = 1; i < nbufs + 1 && buf_info->page; i++, buf_info++) {
dma_unmap_page(dev, buf_info->dma_addr,
buf_info->len, DMA_TO_DEVICE);
if (desc_info->act == XDP_TX)
__free_pages(buf_info->page, 0);
buf_info->page = NULL;
}
if (desc_info->act == XDP_REDIRECT) if (desc_info->act == XDP_REDIRECT)
xdp_return_frame(desc_info->xdpf); xdp_return_frame(desc_info->xdpf);
...@@ -364,8 +381,38 @@ static int ionic_xdp_post_frame(struct net_device *netdev, ...@@ -364,8 +381,38 @@ static int ionic_xdp_post_frame(struct net_device *netdev,
desc_info->xdpf = frame; desc_info->xdpf = frame;
desc_info->act = act; desc_info->act = act;
if (xdp_frame_has_frags(frame)) {
struct ionic_txq_sg_elem *elem;
struct skb_shared_info *sinfo;
struct ionic_buf_info *bi;
skb_frag_t *frag;
int i;
bi = &buf_info[1];
sinfo = xdp_get_shared_info_from_frame(frame);
frag = sinfo->frags;
elem = desc_info->txq_sg_desc->elems;
for (i = 0; i < sinfo->nr_frags; i++, frag++, bi++) {
dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
if (dma_mapping_error(q->dev, dma_addr)) {
stats->dma_map_err++;
ionic_tx_desc_unmap_bufs(q, desc_info);
return -EIO;
}
bi->dma_addr = dma_addr;
bi->len = skb_frag_size(frag);
bi->page = skb_frag_page(frag);
elem->addr = cpu_to_le64(bi->dma_addr);
elem->len = cpu_to_le16(bi->len);
elem++;
desc_info->nbufs++;
}
}
cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE, cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
0, 0, buf_info->dma_addr); 0, (desc_info->nbufs - 1), buf_info->dma_addr);
desc->cmd = cpu_to_le64(cmd); desc->cmd = cpu_to_le64(cmd);
desc->len = cpu_to_le16(len); desc->len = cpu_to_le16(len);
desc->csum_start = 0; desc->csum_start = 0;
...@@ -449,11 +496,14 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, ...@@ -449,11 +496,14 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct ionic_queue *txq; struct ionic_queue *txq;
struct netdev_queue *nq; struct netdev_queue *nq;
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
int remain_len;
int frag_len;
int err = 0; int err = 0;
xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info); xdp_init_buff(&xdp_buf, IONIC_PAGE_SIZE, rxq->xdp_rxq_info);
frag_len = min_t(u16, len, IONIC_XDP_MAX_LINEAR_MTU + VLAN_ETH_HLEN);
xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info), xdp_prepare_buff(&xdp_buf, ionic_rx_buf_va(buf_info),
XDP_PACKET_HEADROOM, len, false); XDP_PACKET_HEADROOM, frag_len, false);
dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info), dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
XDP_PACKET_HEADROOM, len, XDP_PACKET_HEADROOM, len,
...@@ -461,6 +511,43 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, ...@@ -461,6 +511,43 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
prefetchw(&xdp_buf.data_hard_start); prefetchw(&xdp_buf.data_hard_start);
/* We limit MTU size to one buffer if !xdp_has_frags, so
* if the recv len is bigger than one buffer
* then we know we have frag info to gather
*/
remain_len = len - frag_len;
if (remain_len) {
struct skb_shared_info *sinfo;
struct ionic_buf_info *bi;
skb_frag_t *frag;
bi = buf_info;
sinfo = xdp_get_shared_info_from_buff(&xdp_buf);
sinfo->nr_frags = 0;
sinfo->xdp_frags_size = 0;
xdp_buff_set_frags_flag(&xdp_buf);
do {
if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) {
err = -ENOSPC;
goto out_xdp_abort;
}
frag = &sinfo->frags[sinfo->nr_frags];
sinfo->nr_frags++;
bi++;
frag_len = min_t(u16, remain_len, ionic_rx_buf_size(bi));
dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(bi),
0, frag_len, DMA_FROM_DEVICE);
skb_frag_fill_page_desc(frag, bi->page, 0, frag_len);
sinfo->xdp_frags_size += frag_len;
remain_len -= frag_len;
if (page_is_pfmemalloc(bi->page))
xdp_buff_set_frag_pfmemalloc(&xdp_buf);
} while (remain_len > 0);
}
xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf); xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
switch (xdp_action) { switch (xdp_action) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment