Commit d64b5e85 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: add netif_tx_napi_add()

netif_tx_napi_add() is a variant of netif_napi_add()

It should be used by drivers that use a napi structure
to exclusively poll TX.

We do not want to add this kind of napi in napi_hash[] in following
patches, adding generic busy polling to all NAPI drivers.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 93f93a44
...@@ -1216,7 +1216,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, ...@@ -1216,7 +1216,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
/* Initialize SW view of the ring */ /* Initialize SW view of the ring */
spin_lock_init(&ring->lock); spin_lock_init(&ring->lock);
ring->priv = priv; ring->priv = priv;
netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
ring->index = index; ring->index = index;
ring->size = size; ring->size = size;
ring->alloc_size = ring->size; ring->alloc_size = ring->size;
......
...@@ -2041,11 +2041,11 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv) ...@@ -2041,11 +2041,11 @@ static void bcmgenet_init_tx_napi(struct bcmgenet_priv *priv)
for (i = 0; i < priv->hw_params->tx_queues; ++i) { for (i = 0; i < priv->hw_params->tx_queues; ++i) {
ring = &priv->tx_rings[i]; ring = &priv->tx_rings[i];
netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
} }
ring = &priv->tx_rings[DESC_INDEX]; ring = &priv->tx_rings[DESC_INDEX];
netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
} }
static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv) static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
......
...@@ -1050,7 +1050,7 @@ static int fs_enet_probe(struct platform_device *ofdev) ...@@ -1050,7 +1050,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
ndev->netdev_ops = &fs_enet_netdev_ops; ndev->netdev_ops = &fs_enet_netdev_ops;
ndev->watchdog_timeo = 2 * HZ; ndev->watchdog_timeo = 2 * HZ;
netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight); netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi, fpi->napi_weight);
netif_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2); netif_tx_napi_add(ndev, &fep->napi_tx, fs_enet_tx_napi, 2);
ndev->ethtool_ops = &fs_ethtool_ops; ndev->ethtool_ops = &fs_ethtool_ops;
......
...@@ -1347,12 +1347,12 @@ static int gfar_probe(struct platform_device *ofdev) ...@@ -1347,12 +1347,12 @@ static int gfar_probe(struct platform_device *ofdev)
if (priv->poll_mode == GFAR_SQ_POLLING) { if (priv->poll_mode == GFAR_SQ_POLLING) {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx, netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
gfar_poll_rx_sq, GFAR_DEV_WEIGHT); gfar_poll_rx_sq, GFAR_DEV_WEIGHT);
netif_napi_add(dev, &priv->gfargrp[i].napi_tx, netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx_sq, 2); gfar_poll_tx_sq, 2);
} else { } else {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx, netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
gfar_poll_rx, GFAR_DEV_WEIGHT); gfar_poll_rx, GFAR_DEV_WEIGHT);
netif_napi_add(dev, &priv->gfargrp[i].napi_tx, netif_tx_napi_add(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx, 2); gfar_poll_tx, 2);
} }
} }
......
...@@ -156,7 +156,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -156,7 +156,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->mcq.event = mlx4_en_cq_event; cq->mcq.event = mlx4_en_cq_event;
if (cq->is_tx) { if (cq->is_tx) {
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
} else { } else {
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
......
...@@ -1604,7 +1604,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter, ...@@ -1604,7 +1604,7 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) { if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring]; tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll, netif_tx_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
} }
} }
...@@ -2135,7 +2135,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, ...@@ -2135,7 +2135,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
!(adapter->flags & QLCNIC_TX_INTR_SHARED)) { !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
for (ring = 0; ring < adapter->drv_tx_rings; ring++) { for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring]; tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi, netif_tx_napi_add(netdev, &tx_ring->napi,
qlcnic_83xx_msix_tx_poll, qlcnic_83xx_msix_tx_poll,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
} }
......
...@@ -4998,7 +4998,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) ...@@ -4998,7 +4998,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
dev->netdev_ops = &rocker_port_netdev_ops; dev->netdev_ops = &rocker_port_netdev_ops;
dev->ethtool_ops = &rocker_port_ethtool_ops; dev->ethtool_ops = &rocker_port_ethtool_ops;
dev->switchdev_ops = &rocker_port_switchdev_ops; dev->switchdev_ops = &rocker_port_switchdev_ops;
netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
......
...@@ -2469,7 +2469,7 @@ static int cpsw_probe(struct platform_device *pdev) ...@@ -2469,7 +2469,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->netdev_ops = &cpsw_netdev_ops; ndev->netdev_ops = &cpsw_netdev_ops;
ndev->ethtool_ops = &cpsw_ethtool_ops; ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); netif_napi_add(ndev, &priv->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT);
netif_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); netif_tx_napi_add(ndev, &priv->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT);
/* register the network device */ /* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev); SET_NETDEV_DEV(ndev, &pdev->dev);
......
...@@ -1990,7 +1990,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device, ...@@ -1990,7 +1990,7 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
/* NAPI register */ /* NAPI register */
netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT); netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll, NETCP_NAPI_WEIGHT);
netif_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT); netif_tx_napi_add(ndev, &netcp->tx_napi, netcp_tx_poll, NETCP_NAPI_WEIGHT);
/* Register the network device */ /* Register the network device */
ndev->dev_id = 0; ndev->dev_id = 0;
......
...@@ -183,7 +183,7 @@ void *wil_if_alloc(struct device *dev) ...@@ -183,7 +183,7 @@ void *wil_if_alloc(struct device *dev)
netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx, netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,
WIL6210_NAPI_BUDGET); WIL6210_NAPI_BUDGET);
netif_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx, netif_tx_napi_add(ndev, &wil->napi_tx, wil6210_netdev_poll_tx,
WIL6210_NAPI_BUDGET); WIL6210_NAPI_BUDGET);
netif_tx_stop_all_queues(ndev); netif_tx_stop_all_queues(ndev);
......
...@@ -326,7 +326,8 @@ enum { ...@@ -326,7 +326,8 @@ enum {
NAPI_STATE_SCHED, /* Poll is scheduled */ NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_DISABLE, /* Disable pending */ NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_HASHED, /* In NAPI hash */ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
}; };
enum gro_result { enum gro_result {
...@@ -1938,6 +1939,26 @@ static inline void *netdev_priv(const struct net_device *dev) ...@@ -1938,6 +1939,26 @@ static inline void *netdev_priv(const struct net_device *dev)
void netif_napi_add(struct net_device *dev, struct napi_struct *napi, void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight); int (*poll)(struct napi_struct *, int), int weight);
/**
* netif_tx_napi_add - initialize a napi context
* @dev: network device
* @napi: napi context
* @poll: polling function
* @weight: default weight
*
* This variant of netif_napi_add() should be used from drivers using NAPI
* to exclusively poll a TX queue.
* This will avoid we add it into napi_hash[], thus polluting this hash table.
*/
static inline void netif_tx_napi_add(struct net_device *dev,
struct napi_struct *napi,
int (*poll)(struct napi_struct *, int),
int weight)
{
set_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state);
netif_napi_add(dev, napi, poll, weight);
}
/** /**
* netif_napi_del - remove a napi context * netif_napi_del - remove a napi context
* @napi: napi context * @napi: napi context
......
...@@ -4737,7 +4737,8 @@ EXPORT_SYMBOL(sk_busy_loop); ...@@ -4737,7 +4737,8 @@ EXPORT_SYMBOL(sk_busy_loop);
void napi_hash_add(struct napi_struct *napi) void napi_hash_add(struct napi_struct *napi)
{ {
if (test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
return; return;
spin_lock(&napi_hash_lock); spin_lock(&napi_hash_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment