Commit 50f82699 authored by Ioana Ciornei's avatar Ioana Ciornei Committed by David S. Miller

dpaa2-eth: add rx copybreak support

DMA unmapping, allocating a new buffer and DMA mapping it back on the
refill path is really not that efficient. Proper buffer recycling (page
pool, flipping the page and using the other half) cannot be done for
DPAA2 since it's not a ring based controller but it rather deals with
multiple queues which all get their buffers from the same buffer pool on
Rx.

To circumvent these limitations, add support for Rx copybreak. For small
sized packets instead of creating a skb around the buffer in which the
frame was received, allocate a new sk buffer altogether, copy the
contents of the frame and release the initial page back into the buffer
pool.
Signed-off-by: default avatarIoana Ciornei <ioana.ciornei@nxp.com>
Reviewed-by: default avatarAndrew Lunn <andrew@lunn.ch>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 28d137cc
...@@ -418,6 +418,34 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, ...@@ -418,6 +418,34 @@ static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
return xdp_act; return xdp_act;
} }
static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
void *fd_vaddr)
{
u16 fd_offset = dpaa2_fd_get_offset(fd);
u32 fd_length = dpaa2_fd_get_len(fd);
struct sk_buff *skb = NULL;
unsigned int skb_len;
if (fd_length > DPAA2_ETH_DEFAULT_COPYBREAK)
return NULL;
skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
skb = napi_alloc_skb(&ch->napi, skb_len);
if (!skb)
return NULL;
skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
skb_put(skb, fd_length);
memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
dpaa2_eth_recycle_buf(ch->priv, ch, dpaa2_fd_get_addr(fd));
return skb;
}
/* Main Rx frame processing routine */ /* Main Rx frame processing routine */
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch, struct dpaa2_eth_channel *ch,
...@@ -459,9 +487,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, ...@@ -459,9 +487,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return; return;
} }
dma_unmap_page(dev, addr, priv->rx_buf_size, skb = dpaa2_eth_copybreak(ch, fd, vaddr);
DMA_BIDIRECTIONAL); if (!skb) {
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
}
} else if (fd_format == dpaa2_fd_sg) { } else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog); WARN_ON(priv->xdp_prog);
......
...@@ -489,6 +489,8 @@ struct dpaa2_eth_trap_data { ...@@ -489,6 +489,8 @@ struct dpaa2_eth_trap_data {
struct dpaa2_eth_priv *priv; struct dpaa2_eth_priv *priv;
}; };
#define DPAA2_ETH_DEFAULT_COPYBREAK 512
/* Driver private data */ /* Driver private data */
struct dpaa2_eth_priv { struct dpaa2_eth_priv {
struct net_device *net_dev; struct net_device *net_dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment