Commit b0a43db9 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by David S. Miller

net: mvneta: add XDP_TX support

Implement XDP_TX verdict and ndo_xdp_xmit net_device_ops function
pointer
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9e58c8b4
...@@ -1800,16 +1800,19 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, ...@@ -1800,16 +1800,19 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
mvneta_txq_inc_get(txq); mvneta_txq_inc_get(txq);
if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr)) if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr) &&
buf->type != MVNETA_TYPE_XDP_TX)
dma_unmap_single(pp->dev->dev.parent, dma_unmap_single(pp->dev->dev.parent,
tx_desc->buf_phys_addr, tx_desc->buf_phys_addr,
tx_desc->data_size, DMA_TO_DEVICE); tx_desc->data_size, DMA_TO_DEVICE);
if (!buf->skb) if (buf->type == MVNETA_TYPE_SKB && buf->skb) {
continue; bytes_compl += buf->skb->len;
pkts_compl++;
bytes_compl += buf->skb->len; dev_kfree_skb_any(buf->skb);
pkts_compl++; } else if (buf->type == MVNETA_TYPE_XDP_TX ||
dev_kfree_skb_any(buf->skb); buf->type == MVNETA_TYPE_XDP_NDO) {
xdp_return_frame(buf->xdpf);
}
} }
netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
...@@ -1973,6 +1976,111 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) ...@@ -1973,6 +1976,111 @@ int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq)
return i; return i;
} }
static int
mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
struct xdp_frame *xdpf, bool dma_map)
{
struct mvneta_tx_desc *tx_desc;
struct mvneta_tx_buf *buf;
dma_addr_t dma_addr;
if (txq->count >= txq->tx_stop_threshold)
return MVNETA_XDP_DROPPED;
tx_desc = mvneta_txq_next_desc_get(txq);
buf = &txq->buf[txq->txq_put_index];
if (dma_map) {
/* ndo_xdp_xmit */
dma_addr = dma_map_single(pp->dev->dev.parent, xdpf->data,
xdpf->len, DMA_TO_DEVICE);
if (dma_mapping_error(pp->dev->dev.parent, dma_addr)) {
mvneta_txq_desc_put(txq);
return MVNETA_XDP_DROPPED;
}
buf->type = MVNETA_TYPE_XDP_NDO;
} else {
struct page *page = virt_to_page(xdpf->data);
dma_addr = page_pool_get_dma_addr(page) +
sizeof(*xdpf) + xdpf->headroom;
dma_sync_single_for_device(pp->dev->dev.parent, dma_addr,
xdpf->len, DMA_BIDIRECTIONAL);
buf->type = MVNETA_TYPE_XDP_TX;
}
buf->xdpf = xdpf;
tx_desc->command = MVNETA_TXD_FLZ_DESC;
tx_desc->buf_phys_addr = dma_addr;
tx_desc->data_size = xdpf->len;
mvneta_update_stats(pp, 1, xdpf->len, true);
mvneta_txq_inc_put(txq);
txq->pending++;
txq->count++;
return MVNETA_XDP_TX;
}
static int
mvneta_xdp_xmit_back(struct mvneta_port *pp, struct xdp_buff *xdp)
{
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
struct xdp_frame *xdpf;
int cpu;
u32 ret;
xdpf = convert_to_xdp_frame(xdp);
if (unlikely(!xdpf))
return MVNETA_XDP_DROPPED;
cpu = smp_processor_id();
txq = &pp->txqs[cpu % txq_number];
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, cpu);
ret = mvneta_xdp_submit_frame(pp, txq, xdpf, false);
if (ret == MVNETA_XDP_TX)
mvneta_txq_pend_desc_add(pp, txq, 0);
__netif_tx_unlock(nq);
return ret;
}
static int
mvneta_xdp_xmit(struct net_device *dev, int num_frame,
struct xdp_frame **frames, u32 flags)
{
struct mvneta_port *pp = netdev_priv(dev);
int cpu = smp_processor_id();
struct mvneta_tx_queue *txq;
struct netdev_queue *nq;
int i, drops = 0;
u32 ret;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
txq = &pp->txqs[cpu % txq_number];
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, cpu);
for (i = 0; i < num_frame; i++) {
ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
if (ret != MVNETA_XDP_TX) {
xdp_return_frame_rx_napi(frames[i]);
drops++;
}
}
if (unlikely(flags & XDP_XMIT_FLUSH))
mvneta_txq_pend_desc_add(pp, txq, 0);
__netif_tx_unlock(nq);
return num_frame - drops;
}
static int static int
mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
struct bpf_prog *prog, struct xdp_buff *xdp) struct bpf_prog *prog, struct xdp_buff *xdp)
...@@ -1995,6 +2103,11 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -1995,6 +2103,11 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
} }
break; break;
} }
case XDP_TX:
ret = mvneta_xdp_xmit_back(pp, xdp);
if (ret != MVNETA_XDP_TX)
xdp_return_buff(xdp);
break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
/* fall through */ /* fall through */
...@@ -4534,6 +4647,7 @@ static const struct net_device_ops mvneta_netdev_ops = { ...@@ -4534,6 +4647,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_get_stats64 = mvneta_get_stats64, .ndo_get_stats64 = mvneta_get_stats64,
.ndo_do_ioctl = mvneta_ioctl, .ndo_do_ioctl = mvneta_ioctl,
.ndo_bpf = mvneta_xdp, .ndo_bpf = mvneta_xdp,
.ndo_xdp_xmit = mvneta_xdp_xmit,
}; };
static const struct ethtool_ops mvneta_eth_tool_ops = { static const struct ethtool_ops mvneta_eth_tool_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment