Commit 1cc3bd87 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

igb: Remove multi_tx_table and simplify igb_xmit_frame

Instead of using the multi_tx_table to map possible Tx queues to Tx rings
we can just do simple subtraction for the unlikely event that the Tx queue
provided exceeds the number of Tx rings.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAaron Brown  <aaron.f.brown@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 60136906
...@@ -63,8 +63,7 @@ struct igb_adapter; ...@@ -63,8 +63,7 @@ struct igb_adapter;
/* Transmit and receive queues */ /* Transmit and receive queues */
#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \ #define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
(hw->mac.type > e1000_82575 ? 8 : 4)) (hw->mac.type > e1000_82575 ? 8 : 4))
#define IGB_ABS_MAX_TX_QUEUES 8 #define IGB_MAX_TX_QUEUES 16
#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
#define IGB_MAX_VF_MC_ENTRIES 30 #define IGB_MAX_VF_MC_ENTRIES 30
#define IGB_MAX_VF_FUNCTIONS 8 #define IGB_MAX_VF_FUNCTIONS 8
...@@ -324,7 +323,6 @@ struct igb_adapter { ...@@ -324,7 +323,6 @@ struct igb_adapter {
/* to not mess up cache alignment, always add to the bottom */ /* to not mess up cache alignment, always add to the bottom */
u32 eeprom_wol; u32 eeprom_wol;
struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
u16 tx_ring_count; u16 tx_ring_count;
u16 rx_ring_count; u16 rx_ring_count;
unsigned int vfs_allocated_count; unsigned int vfs_allocated_count;
......
...@@ -1875,7 +1875,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, ...@@ -1875,7 +1875,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
err = -ENOMEM; err = -ENOMEM;
netdev = alloc_etherdev_mq(sizeof(struct igb_adapter), netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
IGB_ABS_MAX_TX_QUEUES); IGB_MAX_TX_QUEUES);
if (!netdev) if (!netdev)
goto err_alloc_etherdev; goto err_alloc_etherdev;
...@@ -2620,10 +2620,6 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) ...@@ -2620,10 +2620,6 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
} }
} }
for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
int r_idx = i % adapter->num_tx_queues;
adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
}
return err; return err;
} }
...@@ -4363,12 +4359,21 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, ...@@ -4363,12 +4359,21 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
struct sk_buff *skb)
{
unsigned int r_idx = skb->queue_mapping;
if (r_idx >= adapter->num_tx_queues)
r_idx = r_idx % adapter->num_tx_queues;
return adapter->tx_ring[r_idx];
}
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
struct net_device *netdev) struct net_device *netdev)
{ {
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_ring *tx_ring;
int r_idx = 0;
if (test_bit(__IGB_DOWN, &adapter->state)) { if (test_bit(__IGB_DOWN, &adapter->state)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -4380,14 +4385,17 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, ...@@ -4380,14 +4385,17 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); /*
tx_ring = adapter->multi_tx_table[r_idx]; * The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
if (skb->len < 17) {
if (skb_padto(skb, 17))
return NETDEV_TX_OK;
skb->len = 17;
}
/* This goes back to the question of how to logically map a tx queue return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
* to a flow. Right now, performance is impacted slightly negatively
* if using multiple tx queues. If the stack breaks away from a
* single qdisc implementation, we can look at this again. */
return igb_xmit_frame_ring(skb, tx_ring);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment