Commit 83c317d7 authored by Heiner Kallweit's avatar Heiner Kallweit Committed by Jakub Kicinski

r8169: remove nr_frags argument from rtl_tx_slots_avail

The only time when nr_frags isn't SKB_MAX_FRAGS is when entering
rtl8169_start_xmit(). However we can use SKB_MAX_FRAGS also here
because when queue isn't stopped there should always be room for
MAX_SKB_FRAGS + 1 descriptors.
Signed-off-by: default avatarHeiner Kallweit <hkallweit1@gmail.com>
Link: https://lore.kernel.org/r/3d1f2ad7-31d5-2cac-4f4a-394f8a3cab63@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent b618c327
...@@ -4141,14 +4141,13 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, ...@@ -4141,14 +4141,13 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
return true; return true;
} }
static bool rtl_tx_slots_avail(struct rtl8169_private *tp, static bool rtl_tx_slots_avail(struct rtl8169_private *tp)
unsigned int nr_frags)
{ {
unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC unsigned int slots_avail = READ_ONCE(tp->dirty_tx) + NUM_TX_DESC
- READ_ONCE(tp->cur_tx); - READ_ONCE(tp->cur_tx);
/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
return slots_avail > nr_frags; return slots_avail > MAX_SKB_FRAGS;
} }
/* Versions RTL8102e and from RTL8168c onwards support csum_v2 */ /* Versions RTL8102e and from RTL8168c onwards support csum_v2 */
...@@ -4183,7 +4182,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, ...@@ -4183,7 +4182,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first = tp->TxDescArray + entry; txd_first = tp->TxDescArray + entry;
if (unlikely(!rtl_tx_slots_avail(tp, frags))) { if (unlikely(!rtl_tx_slots_avail(tp))) {
if (net_ratelimit()) if (net_ratelimit())
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop_0; goto err_stop_0;
...@@ -4228,7 +4227,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, ...@@ -4228,7 +4227,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1); WRITE_ONCE(tp->cur_tx, tp->cur_tx + frags + 1);
stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); stop_queue = !rtl_tx_slots_avail(tp);
if (unlikely(stop_queue)) { if (unlikely(stop_queue)) {
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
* not miss a ring update when it notices a stopped queue. * not miss a ring update when it notices a stopped queue.
...@@ -4243,7 +4242,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, ...@@ -4243,7 +4242,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
* can't. * can't.
*/ */
smp_mb__after_atomic(); smp_mb__after_atomic();
if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) if (rtl_tx_slots_avail(tp))
netif_start_queue(dev); netif_start_queue(dev);
door_bell = true; door_bell = true;
} }
...@@ -4394,10 +4393,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, ...@@ -4394,10 +4393,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
* ring status. * ring status.
*/ */
smp_store_mb(tp->dirty_tx, dirty_tx); smp_store_mb(tp->dirty_tx, dirty_tx);
if (netif_queue_stopped(dev) && if (netif_queue_stopped(dev) && rtl_tx_slots_avail(tp))
rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
netif_wake_queue(dev); netif_wake_queue(dev);
}
/* /*
* 8168 hack: TxPoll requests are lost when the Tx packets are * 8168 hack: TxPoll requests are lost when the Tx packets are
* too close. Let's kick an extra TxPoll request when a burst * too close. Let's kick an extra TxPoll request when a burst
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment