Commit 869b9b19 authored by Zoltan Kiss's avatar Zoltan Kiss Committed by David S. Miller

xen-netback: Stop using xenvif_tx_pending_slots_available

Since the early days TX stops if there isn't enough free pending slots to
consume a maximum sized (slot-wise) packet. Probably the reason for that is to
avoid the case when we don't have enough free pending slot in the ring to finish
the packet. But if we make sure that the pending ring has the same size as the
shared ring, that shouldn't really happen. The frontend can only post packets
which fit the to the free space of the shared ring. If it doesn't, the frontend
has to stop, as it can only increase the req_prod when the whole packet fits
onto the ring.
This patch avoid using this checking, makes sure the 2 ring has the same size,
and remove a checking from the callback. As now we don't stop the NAPI instance
on this condition, we don't have to wake it up if we free pending slots up.
Signed-off-by: default avatarZoltan Kiss <zoltan.kiss@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b8b6529f
...@@ -81,7 +81,7 @@ struct xenvif_rx_meta { ...@@ -81,7 +81,7 @@ struct xenvif_rx_meta {
#define MAX_BUFFER_OFFSET PAGE_SIZE #define MAX_BUFFER_OFFSET PAGE_SIZE
#define MAX_PENDING_REQS 256 #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE
/* It's possible for an skb to have a maximal number of frags /* It's possible for an skb to have a maximal number of frags
* but still be less than MAX_BUFFER_OFFSET in size. Thus the * but still be less than MAX_BUFFER_OFFSET in size. Thus the
...@@ -251,12 +251,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif) ...@@ -251,12 +251,6 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
vif->pending_prod + vif->pending_cons; vif->pending_prod + vif->pending_cons;
} }
static inline bool xenvif_tx_pending_slots_available(struct xenvif *vif)
{
return nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
< MAX_PENDING_REQS;
}
/* Callback from stack when TX packet can be released */ /* Callback from stack when TX packet can be released */
void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
......
...@@ -88,8 +88,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget) ...@@ -88,8 +88,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
local_irq_save(flags); local_irq_save(flags);
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
if (!(more_to_do && if (!more_to_do)
xenvif_tx_pending_slots_available(vif)))
__napi_complete(napi); __napi_complete(napi);
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -1167,8 +1167,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) ...@@ -1167,8 +1167,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
struct sk_buff *skb; struct sk_buff *skb;
int ret; int ret;
while (xenvif_tx_pending_slots_available(vif) && while (skb_queue_len(&vif->tx_queue) < budget) {
(skb_queue_len(&vif->tx_queue) < budget)) {
struct xen_netif_tx_request txreq; struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
...@@ -1508,13 +1507,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success) ...@@ -1508,13 +1507,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
wake_up(&vif->dealloc_wq); wake_up(&vif->dealloc_wq);
spin_unlock_irqrestore(&vif->callback_lock, flags); spin_unlock_irqrestore(&vif->callback_lock, flags);
if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx) &&
xenvif_tx_pending_slots_available(vif)) {
local_bh_disable();
napi_schedule(&vif->napi);
local_bh_enable();
}
if (likely(zerocopy_success)) if (likely(zerocopy_success))
vif->tx_zerocopy_success++; vif->tx_zerocopy_success++;
else else
...@@ -1706,8 +1698,7 @@ static inline int rx_work_todo(struct xenvif *vif) ...@@ -1706,8 +1698,7 @@ static inline int rx_work_todo(struct xenvif *vif)
static inline int tx_work_todo(struct xenvif *vif) static inline int tx_work_todo(struct xenvif *vif)
{ {
if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
xenvif_tx_pending_slots_available(vif))
return 1; return 1;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment