Commit f8e34d24 authored by Govindarajulu Varadarajan's avatar Govindarajulu Varadarajan Committed by David S. Miller

enic: support skb->xmit_more

Check and update posted_index only when skb->xmit_more is 0 or tx queue is full.

v2:
use txq_map instead of skb_get_queue_mapping(skb)
Signed-off-by: default avatarGovindarajulu Varadarajan <_govind@gmx.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3819ffdf
...@@ -533,6 +533,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, ...@@ -533,6 +533,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
struct vnic_wq *wq; struct vnic_wq *wq;
unsigned long flags; unsigned long flags;
unsigned int txq_map; unsigned int txq_map;
struct netdev_queue *txq;
if (skb->len <= 0) { if (skb->len <= 0) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -541,6 +542,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, ...@@ -541,6 +542,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
txq_map = skb_get_queue_mapping(skb) % enic->wq_count; txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
wq = &enic->wq[txq_map]; wq = &enic->wq[txq_map];
txq = netdev_get_tx_queue(netdev, txq_map);
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs, /* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
* which is very likely. In the off chance it's going to take * which is very likely. In the off chance it's going to take
...@@ -558,7 +560,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, ...@@ -558,7 +560,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (vnic_wq_desc_avail(wq) < if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) { skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); netif_tx_stop_queue(txq);
/* This is a hard error, log it */ /* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n"); netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
...@@ -568,7 +570,9 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb, ...@@ -568,7 +570,9 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
enic_queue_wq_skb(enic, wq, skb); enic_queue_wq_skb(enic, wq, skb);
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map)); netif_tx_stop_queue(txq);
if (!skb->xmit_more || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags); spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
......
...@@ -104,6 +104,17 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq) ...@@ -104,6 +104,17 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
return wq->to_use->desc; return wq->to_use->desc;
} }
static inline void vnic_wq_doorbell(struct vnic_wq *wq)
{
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(wq->to_use->index, &wq->ctrl->posted_index);
}
static inline void vnic_wq_post(struct vnic_wq *wq, static inline void vnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr, void *os_buf, dma_addr_t dma_addr,
unsigned int len, int sop, int eop, unsigned int len, int sop, int eop,
...@@ -122,15 +133,6 @@ static inline void vnic_wq_post(struct vnic_wq *wq, ...@@ -122,15 +133,6 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
buf->wr_id = wrid; buf->wr_id = wrid;
buf = buf->next; buf = buf->next;
if (eop) {
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(buf->index, &wq->ctrl->posted_index);
}
wq->to_use = buf; wq->to_use = buf;
wq->ring.desc_avail -= desc_skip_cnt; wq->ring.desc_avail -= desc_skip_cnt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment