Commit e5ef1de1 authored by Lennert Buytenhek's avatar Lennert Buytenhek Committed by Lennert Buytenhek

mv643xx_eth: transmit multiqueue support

As all the infrastructure for multiple transmit queues already exists
in the driver, this patch is entirely trivial.

The individual transmit queues are still serialised by the driver's
per-port private spinlock, but that will disappear (i.e. be replaced
by the per-subqueue ->_xmit_lock) in a subsequent patch.
Signed-off-by: default avatarLennert Buytenhek <buytenh@marvell.com>
parent befefe21
...@@ -449,15 +449,10 @@ static void txq_disable(struct tx_queue *txq) ...@@ -449,15 +449,10 @@ static void txq_disable(struct tx_queue *txq)
static void __txq_maybe_wake(struct tx_queue *txq) static void __txq_maybe_wake(struct tx_queue *txq)
{ {
struct mv643xx_eth_private *mp = txq_to_mp(txq); struct mv643xx_eth_private *mp = txq_to_mp(txq);
struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
/*
* netif_{stop,wake}_queue() flow control only applies to
* the primary queue.
*/
BUG_ON(txq->index != 0);
if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
netif_wake_queue(mp->dev); netif_tx_wake_queue(nq);
} }
...@@ -827,8 +822,11 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -827,8 +822,11 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct mv643xx_eth_private *mp = netdev_priv(dev); struct mv643xx_eth_private *mp = netdev_priv(dev);
struct net_device_stats *stats = &dev->stats; struct net_device_stats *stats = &dev->stats;
int queue;
struct tx_queue *txq; struct tx_queue *txq;
struct netdev_queue *nq;
unsigned long flags; unsigned long flags;
int entries_left;
if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
stats->tx_dropped++; stats->tx_dropped++;
...@@ -838,15 +836,16 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -838,15 +836,16 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
spin_lock_irqsave(&mp->lock, flags); queue = skb_get_queue_mapping(skb);
txq = mp->txq + queue;
nq = netdev_get_tx_queue(dev, queue);
txq = mp->txq; spin_lock_irqsave(&mp->lock, flags);
if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
spin_unlock_irqrestore(&mp->lock, flags); spin_unlock_irqrestore(&mp->lock, flags);
if (txq->index == 0 && net_ratelimit()) if (net_ratelimit())
dev_printk(KERN_ERR, &dev->dev, dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
"primary tx queue full?!\n");
kfree_skb(skb); kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -856,13 +855,9 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -856,13 +855,9 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_packets++; stats->tx_packets++;
dev->trans_start = jiffies; dev->trans_start = jiffies;
if (txq->index == 0) { entries_left = txq->tx_ring_size - txq->tx_desc_count;
int entries_left; if (entries_left < MAX_SKB_FRAGS + 1)
netif_tx_stop_queue(nq);
entries_left = txq->tx_ring_size - txq->tx_desc_count;
if (entries_left < MAX_SKB_FRAGS + 1)
netif_stop_queue(dev);
}
spin_unlock_irqrestore(&mp->lock, flags); spin_unlock_irqrestore(&mp->lock, flags);
...@@ -2169,10 +2164,10 @@ static void tx_timeout_task(struct work_struct *ugly) ...@@ -2169,10 +2164,10 @@ static void tx_timeout_task(struct work_struct *ugly)
mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
if (netif_running(mp->dev)) { if (netif_running(mp->dev)) {
netif_stop_queue(mp->dev); netif_tx_stop_all_queues(mp->dev);
port_reset(mp); port_reset(mp);
port_start(mp); port_start(mp);
netif_wake_queue(mp->dev); netif_tx_wake_all_queues(mp->dev);
} }
} }
...@@ -2546,7 +2541,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) ...@@ -2546,7 +2541,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
return -ENODEV; return -ENODEV;
} }
dev = alloc_etherdev(sizeof(struct mv643xx_eth_private)); dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
if (!dev) if (!dev)
return -ENOMEM; return -ENOMEM;
...@@ -2559,6 +2554,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) ...@@ -2559,6 +2554,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mp->dev = dev; mp->dev = dev;
set_params(mp, pd); set_params(mp, pd);
dev->real_num_tx_queues = mp->txq_count;
spin_lock_init(&mp->lock); spin_lock_init(&mp->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment