Commit ec222003 authored by Jose Abreu's avatar Jose Abreu Committed by David S. Miller

net: stmmac: Prepare to add Split Header support

In order to add Split Header support, stmmac_rx() needs to take into
account that packet may be split accross multiple descriptors.

Refactor the logic of this function in order to support this scenario.

Changes from v2:
	- Fixup if condition detection (Jakub)
	- Don't stop NAPI with unfinished packet (Jakub)
	- Use napi_alloc_skb() (Jakub)
Signed-off-by: default avatarJose Abreu <joabreu@synopsys.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 25e80cd0
......@@ -74,6 +74,12 @@ struct stmmac_rx_queue {
u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
unsigned int state_saved;
struct {
struct sk_buff *skb;
unsigned int len;
unsigned int error;
} state;
};
struct stmmac_channel {
......
......@@ -3353,9 +3353,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
int status = 0, coe = priv->hw->rx_csum;
unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
unsigned int count = 0;
struct sk_buff *skb = NULL;
if (netif_msg_rx_status(priv)) {
void *rx_head;
......@@ -3369,10 +3370,28 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
enum pkt_hash_types hash_type;
struct stmmac_rx_buffer *buf;
unsigned int prev_len = 0;
struct dma_desc *np, *p;
int entry, status;
int entry;
u32 hash;
if (!count && rx_q->state_saved) {
skb = rx_q->state.skb;
error = rx_q->state.error;
len = rx_q->state.len;
} else {
rx_q->state_saved = false;
skb = NULL;
error = 0;
len = 0;
}
if (count >= limit)
break;
read_again:
entry = next_entry;
buf = &rx_q->buf_pool[entry];
......@@ -3407,29 +3426,25 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
page_pool_recycle_direct(rx_q->page_pool, buf->page);
priv->dev->stats.rx_errors++;
buf->page = NULL;
} else {
enum pkt_hash_types hash_type;
struct sk_buff *skb;
unsigned int des;
int frame_len;
u32 hash;
stmmac_get_desc_addr(priv, p, &des);
frame_len = stmmac_get_rx_frame_len(priv, p, coe);
error = 1;
}
/* If frame length is greater than skb buffer size
* (preallocated during init) then the packet is
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
if (net_ratelimit())
netdev_err(priv->dev,
"len %d larger than size (%d)\n",
frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
if (unlikely(error && (status & rx_not_ls)))
goto read_again;
if (unlikely(error)) {
if (skb)
dev_kfree_skb(skb);
continue;
}
/* Buffer is good. Go on. */
if (likely(status & rx_not_ls)) {
len += priv->dma_buf_sz;
} else {
prev_len = len;
len = stmmac_get_rx_frame_len(priv, p, coe);
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP)
*
......@@ -3439,37 +3454,49 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
*/
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap))
frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) {
netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
p, entry, des);
netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
frame_len, status);
len -= ETH_FCS_LEN;
}
skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
if (unlikely(!skb)) {
if (!skb) {
skb = napi_alloc_skb(&ch->rx_napi, len);
if (!skb) {
priv->dev->stats.rx_dropped++;
continue;
}
dma_sync_single_for_cpu(priv->device, buf->addr,
frame_len, DMA_FROM_DEVICE);
dma_sync_single_for_cpu(priv->device, buf->addr, len,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(buf->page),
frame_len);
skb_put(skb, frame_len);
len);
skb_put(skb, len);
if (netif_msg_pktdata(priv)) {
netdev_dbg(priv->dev, "frame received (%dbytes)",
frame_len);
print_pkt(skb->data, frame_len);
/* Data payload copied into SKB, page ready for recycle */
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
} else {
unsigned int buf_len = len - prev_len;
if (likely(status & rx_not_ls))
buf_len = priv->dma_buf_sz;
dma_sync_single_for_cpu(priv->device, buf->addr,
buf_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, 0, buf_len,
priv->dma_buf_sz);
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->page);
buf->page = NULL;
}
stmmac_get_rx_hwtstamp(priv, p, np, skb);
if (likely(status & rx_not_ls))
goto read_again;
stmmac_rx_vlan(priv->dev, skb);
/* Got entire packet into SKB. Finish it. */
stmmac_get_rx_hwtstamp(priv, p, np, skb);
stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev);
if (unlikely(!coe))
......@@ -3483,13 +3510,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rx_napi, skb);
/* Data payload copied into SKB, page ready for recycle */
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
priv->dev->stats.rx_bytes += len;
}
if (status & rx_not_ls) {
rx_q->state_saved = true;
rx_q->state.skb = skb;
rx_q->state.error = error;
rx_q->state.len = len;
}
stmmac_rx_refill(priv, queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment