Commit 53511453 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: add netdev_txq_bql_{enqueue, complete}_prefetchw() helpers

Add two helpers so that drivers do not have to care of BQL being
available or not.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Reported-by: default avatarJim Davis <jim.epost@gmail.com>
Fixes: 29d40c90 ("net/mlx4_en: Use prefetch in tx path")
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 709c48b3
...@@ -392,7 +392,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, ...@@ -392,7 +392,8 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
if (!priv->port_up) if (!priv->port_up)
return true; return true;
prefetchw(&ring->tx_queue->dql.limit); netdev_txq_bql_complete_prefetchw(ring->tx_queue);
index = cons_index & size_mask; index = cons_index & size_mask;
cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb);
...@@ -737,7 +738,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -737,7 +738,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag = vlan_tx_tag_get(skb); vlan_tag = vlan_tx_tag_get(skb);
prefetchw(&ring->tx_queue->dql); netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
/* Track current inflight packets for performance analysis */ /* Track current inflight packets for performance analysis */
AVG_PERF_COUNTER(priv->pstats.inflight_avg, AVG_PERF_COUNTER(priv->pstats.inflight_avg,
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/prefetch.h>
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
...@@ -2480,6 +2481,34 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue) ...@@ -2480,6 +2481,34 @@ netif_xmit_frozen_or_drv_stopped(const struct netdev_queue *dev_queue)
return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN; return dev_queue->state & QUEUE_STATE_DRV_XOFF_OR_FROZEN;
} }
/**
* netdev_txq_bql_enqueue_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their ndo_start_xmit(),
* to give appropriate hint to the cpu.
*/
static inline void netdev_txq_bql_enqueue_prefetchw(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_BQL
prefetchw(&dev_queue->dql.num_queued);
#endif
}
/**
* netdev_txq_bql_complete_prefetchw - prefetch bql data for write
* @dev_queue: pointer to transmit queue
*
* BQL enabled drivers might use this helper in their TX completion path,
* to give appropriate hint to the cpu.
*/
static inline void netdev_txq_bql_complete_prefetchw(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_BQL
prefetchw(&dev_queue->dql.limit);
#endif
}
static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
unsigned int bytes) unsigned int bytes)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment