Commit 8b278a5b authored by Ong Boon Leong's avatar Ong Boon Leong Committed by David S. Miller

net: stmmac: Add support for XDP_REDIRECT action

This patch adds the support of XDP_REDIRECT to another remote cpu for
further action. It also implements ndo_xdp_xmit ops, enabling the driver
to transmit packets forwarded to it by XDP program running on another
interface.

This patch has been tested using "xdp_redirect_cpu" for XDP_REDIRECT
+ drop testing. It also been tested with "xdp_redirect" sample app
which can be used to exercise ndo_xdp_xmit ops. The burst traffics are
generated using pktgen_sample03_burst_single_flow.sh in samples/pktgen
directory.

v4: Move xdp_do_flush() processing into stmmac_finalize_xdp_rx() and
    combined the XDP verdict of XDP TX and REDIRECT together.

v3: Added 'nq->trans_start = jiffies' to avoid TX time-out as we are
    sharing TX queue between slow path and XDP. Thanks to Jakub Kicinski
    for point out.
Signed-off-by: default avatarOng Boon Leong <boon.leong.ong@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent be8b38a7
...@@ -39,6 +39,7 @@ struct stmmac_resources { ...@@ -39,6 +39,7 @@ struct stmmac_resources {
enum stmmac_txbuf_type { enum stmmac_txbuf_type {
STMMAC_TXBUF_T_SKB, STMMAC_TXBUF_T_SKB,
STMMAC_TXBUF_T_XDP_TX, STMMAC_TXBUF_T_XDP_TX,
STMMAC_TXBUF_T_XDP_NDO,
}; };
struct stmmac_tx_info { struct stmmac_tx_info {
......
...@@ -72,6 +72,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address"); ...@@ -72,6 +72,7 @@ MODULE_PARM_DESC(phyaddr, "Physical device address");
#define STMMAC_XDP_PASS 0 #define STMMAC_XDP_PASS 0
#define STMMAC_XDP_CONSUMED BIT(0) #define STMMAC_XDP_CONSUMED BIT(0)
#define STMMAC_XDP_TX BIT(1) #define STMMAC_XDP_TX BIT(1)
#define STMMAC_XDP_REDIRECT BIT(2)
static int flow_ctrl = FLOW_AUTO; static int flow_ctrl = FLOW_AUTO;
module_param(flow_ctrl, int, 0644); module_param(flow_ctrl, int, 0644);
...@@ -1458,7 +1459,8 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i) ...@@ -1458,7 +1459,8 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
} }
if (tx_q->xdpf[i] && if (tx_q->xdpf[i] &&
tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX) { (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
xdp_return_frame(tx_q->xdpf[i]); xdp_return_frame(tx_q->xdpf[i]);
tx_q->xdpf[i] = NULL; tx_q->xdpf[i] = NULL;
} }
...@@ -2220,7 +2222,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -2220,7 +2222,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
struct dma_desc *p; struct dma_desc *p;
int status; int status;
if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
xdpf = tx_q->xdpf[entry]; xdpf = tx_q->xdpf[entry];
skb = NULL; skb = NULL;
} else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
...@@ -2292,6 +2295,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) ...@@ -2292,6 +2295,12 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
tx_q->xdpf[entry] = NULL; tx_q->xdpf[entry] = NULL;
} }
if (xdpf &&
tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
xdp_return_frame(xdpf);
tx_q->xdpf[entry] = NULL;
}
if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
if (likely(skb)) { if (likely(skb)) {
pkts_compl++; pkts_compl++;
...@@ -4246,10 +4255,9 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv, ...@@ -4246,10 +4255,9 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
} }
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf) struct xdp_frame *xdpf, bool dma_map)
{ {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
struct page *page = virt_to_page(xdpf->data);
unsigned int entry = tx_q->cur_tx; unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc; struct dma_desc *tx_desc;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -4265,12 +4273,23 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, ...@@ -4265,12 +4273,23 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
else else
tx_desc = tx_q->dma_tx + entry; tx_desc = tx_q->dma_tx + entry;
dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) + if (dma_map) {
xdpf->headroom; dma_addr = dma_map_single(priv->device, xdpf->data,
dma_sync_single_for_device(priv->device, dma_addr, xdpf->len, DMA_TO_DEVICE);
xdpf->len, DMA_BIDIRECTIONAL); if (dma_mapping_error(priv->device, dma_addr))
return STMMAC_XDP_CONSUMED;
tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
} else {
struct page *page = virt_to_page(xdpf->data);
dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
xdpf->headroom;
dma_sync_single_for_device(priv->device, dma_addr,
xdpf->len, DMA_BIDIRECTIONAL);
tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
}
tx_q->tx_skbuff_dma[entry].buf = dma_addr; tx_q->tx_skbuff_dma[entry].buf = dma_addr;
tx_q->tx_skbuff_dma[entry].map_as_page = false; tx_q->tx_skbuff_dma[entry].map_as_page = false;
...@@ -4340,7 +4359,7 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv, ...@@ -4340,7 +4359,7 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
/* Avoids TX time-out as we are sharing with slow path */ /* Avoids TX time-out as we are sharing with slow path */
nq->trans_start = jiffies; nq->trans_start = jiffies;
res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf); res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
if (res == STMMAC_XDP_TX) if (res == STMMAC_XDP_TX)
stmmac_flush_tx_descriptors(priv, queue); stmmac_flush_tx_descriptors(priv, queue);
...@@ -4372,6 +4391,12 @@ static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv, ...@@ -4372,6 +4391,12 @@ static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
case XDP_TX: case XDP_TX:
res = stmmac_xdp_xmit_back(priv, xdp); res = stmmac_xdp_xmit_back(priv, xdp);
break; break;
case XDP_REDIRECT:
if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
res = STMMAC_XDP_CONSUMED;
else
res = STMMAC_XDP_REDIRECT;
break;
default: default:
bpf_warn_invalid_xdp_action(act); bpf_warn_invalid_xdp_action(act);
fallthrough; fallthrough;
...@@ -4398,6 +4423,9 @@ static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv, ...@@ -4398,6 +4423,9 @@ static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
if (xdp_status & STMMAC_XDP_TX) if (xdp_status & STMMAC_XDP_TX)
stmmac_tx_timer_arm(priv, queue); stmmac_tx_timer_arm(priv, queue);
if (xdp_status & STMMAC_XDP_REDIRECT)
xdp_do_flush();
} }
/** /**
...@@ -4584,7 +4612,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -4584,7 +4612,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
count++; count++;
continue; continue;
} else if (xdp_res & STMMAC_XDP_TX) { } else if (xdp_res & (STMMAC_XDP_TX |
STMMAC_XDP_REDIRECT)) {
xdp_status |= xdp_res; xdp_status |= xdp_res;
buf->page = NULL; buf->page = NULL;
skb = NULL; skb = NULL;
...@@ -5600,6 +5629,48 @@ static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf) ...@@ -5600,6 +5629,48 @@ static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
} }
} }
static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
struct xdp_frame **frames, u32 flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
int cpu = smp_processor_id();
struct netdev_queue *nq;
int i, nxmit = 0;
int queue;
if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
return -ENETDOWN;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
queue = stmmac_xdp_get_tx_queue(priv, cpu);
nq = netdev_get_tx_queue(priv->dev, queue);
__netif_tx_lock(nq, cpu);
/* Avoids TX time-out as we are sharing with slow path */
nq->trans_start = jiffies;
for (i = 0; i < num_frames; i++) {
int res;
res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
if (res == STMMAC_XDP_CONSUMED)
break;
nxmit++;
}
if (flags & XDP_XMIT_FLUSH) {
stmmac_flush_tx_descriptors(priv, queue);
stmmac_tx_timer_arm(priv, queue);
}
__netif_tx_unlock(nq);
return nxmit;
}
static const struct net_device_ops stmmac_netdev_ops = { static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open, .ndo_open = stmmac_open,
.ndo_start_xmit = stmmac_xmit, .ndo_start_xmit = stmmac_xmit,
...@@ -5619,6 +5690,7 @@ static const struct net_device_ops stmmac_netdev_ops = { ...@@ -5619,6 +5690,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid, .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid, .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
.ndo_bpf = stmmac_bpf, .ndo_bpf = stmmac_bpf,
.ndo_xdp_xmit = stmmac_xdp_xmit,
}; };
static void stmmac_reset_subtask(struct stmmac_priv *priv) static void stmmac_reset_subtask(struct stmmac_priv *priv)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment