Commit ec222003 authored by Jose Abreu's avatar Jose Abreu Committed by David S. Miller

net: stmmac: Prepare to add Split Header support

In order to add Split Header support, stmmac_rx() needs to take into
account that packet may be split accross multiple descriptors.

Refactor the logic of this function in order to support this scenario.

Changes from v2:
	- Fixup if condition detection (Jakub)
	- Don't stop NAPI with unfinished packet (Jakub)
	- Use napi_alloc_skb() (Jakub)
Signed-off-by: default avatarJose Abreu <joabreu@synopsys.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 25e80cd0
...@@ -74,6 +74,12 @@ struct stmmac_rx_queue { ...@@ -74,6 +74,12 @@ struct stmmac_rx_queue {
u32 rx_zeroc_thresh; u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy; dma_addr_t dma_rx_phy;
u32 rx_tail_addr; u32 rx_tail_addr;
unsigned int state_saved;
struct {
struct sk_buff *skb;
unsigned int len;
unsigned int error;
} state;
}; };
struct stmmac_channel { struct stmmac_channel {
......
...@@ -3353,9 +3353,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3353,9 +3353,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{ {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue]; struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
int status = 0, coe = priv->hw->rx_csum;
unsigned int next_entry = rx_q->cur_rx; unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum; struct sk_buff *skb = NULL;
unsigned int count = 0;
if (netif_msg_rx_status(priv)) { if (netif_msg_rx_status(priv)) {
void *rx_head; void *rx_head;
...@@ -3369,10 +3370,28 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3369,10 +3370,28 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
} }
while (count < limit) { while (count < limit) {
enum pkt_hash_types hash_type;
struct stmmac_rx_buffer *buf; struct stmmac_rx_buffer *buf;
unsigned int prev_len = 0;
struct dma_desc *np, *p; struct dma_desc *np, *p;
int entry, status; int entry;
u32 hash;
if (!count && rx_q->state_saved) {
skb = rx_q->state.skb;
error = rx_q->state.error;
len = rx_q->state.len;
} else {
rx_q->state_saved = false;
skb = NULL;
error = 0;
len = 0;
}
if (count >= limit)
break;
read_again:
entry = next_entry; entry = next_entry;
buf = &rx_q->buf_pool[entry]; buf = &rx_q->buf_pool[entry];
...@@ -3407,28 +3426,24 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3407,28 +3426,24 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
page_pool_recycle_direct(rx_q->page_pool, buf->page); page_pool_recycle_direct(rx_q->page_pool, buf->page);
priv->dev->stats.rx_errors++; priv->dev->stats.rx_errors++;
buf->page = NULL; buf->page = NULL;
error = 1;
}
if (unlikely(error && (status & rx_not_ls)))
goto read_again;
if (unlikely(error)) {
if (skb)
dev_kfree_skb(skb);
continue;
}
/* Buffer is good. Go on. */
if (likely(status & rx_not_ls)) {
len += priv->dma_buf_sz;
} else { } else {
enum pkt_hash_types hash_type; prev_len = len;
struct sk_buff *skb; len = stmmac_get_rx_frame_len(priv, p, coe);
unsigned int des;
int frame_len;
u32 hash;
stmmac_get_desc_addr(priv, p, &des);
frame_len = stmmac_get_rx_frame_len(priv, p, coe);
/* If frame length is greater than skb buffer size
* (preallocated during init) then the packet is
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
if (net_ratelimit())
netdev_err(priv->dev,
"len %d larger than size (%d)\n",
frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
continue;
}
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP) * Type frames (LLC/LLC-SNAP)
...@@ -3439,57 +3454,71 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) ...@@ -3439,57 +3454,71 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
*/ */
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap)) unlikely(status != llc_snap))
frame_len -= ETH_FCS_LEN; len -= ETH_FCS_LEN;
}
if (netif_msg_rx_status(priv)) {
netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
p, entry, des);
netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
frame_len, status);
}
skb = netdev_alloc_skb_ip_align(priv->dev, frame_len); if (!skb) {
if (unlikely(!skb)) { skb = napi_alloc_skb(&ch->rx_napi, len);
if (!skb) {
priv->dev->stats.rx_dropped++; priv->dev->stats.rx_dropped++;
continue; continue;
} }
dma_sync_single_for_cpu(priv->device, buf->addr, dma_sync_single_for_cpu(priv->device, buf->addr, len,
frame_len, DMA_FROM_DEVICE); DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(buf->page), skb_copy_to_linear_data(skb, page_address(buf->page),
frame_len); len);
skb_put(skb, frame_len); skb_put(skb, len);
if (netif_msg_pktdata(priv)) { /* Data payload copied into SKB, page ready for recycle */
netdev_dbg(priv->dev, "frame received (%dbytes)", page_pool_recycle_direct(rx_q->page_pool, buf->page);
frame_len); buf->page = NULL;
print_pkt(skb->data, frame_len); } else {
} unsigned int buf_len = len - prev_len;
stmmac_get_rx_hwtstamp(priv, p, np, skb); if (likely(status & rx_not_ls))
buf_len = priv->dma_buf_sz;
stmmac_rx_vlan(priv->dev, skb); dma_sync_single_for_cpu(priv->device, buf->addr,
buf_len, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, 0, buf_len,
priv->dma_buf_sz);
skb->protocol = eth_type_trans(skb, priv->dev); /* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->page);
buf->page = NULL;
}
if (unlikely(!coe)) if (likely(status & rx_not_ls))
skb_checksum_none_assert(skb); goto read_again;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type)) /* Got entire packet into SKB. Finish it. */
skb_set_hash(skb, hash, hash_type);
skb_record_rx_queue(skb, queue); stmmac_get_rx_hwtstamp(priv, p, np, skb);
napi_gro_receive(&ch->rx_napi, skb); stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev);
/* Data payload copied into SKB, page ready for recycle */ if (unlikely(!coe))
page_pool_recycle_direct(rx_q->page_pool, buf->page); skb_checksum_none_assert(skb);
buf->page = NULL; else
skb->ip_summed = CHECKSUM_UNNECESSARY;
priv->dev->stats.rx_packets++; if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
priv->dev->stats.rx_bytes += frame_len; skb_set_hash(skb, hash, hash_type);
}
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rx_napi, skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += len;
}
if (status & rx_not_ls) {
rx_q->state_saved = true;
rx_q->state.skb = skb;
rx_q->state.error = error;
rx_q->state.len = len;
} }
stmmac_rx_refill(priv, queue); stmmac_rx_refill(priv, queue);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment