Commit 9f65e15b authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

i40e: Move rings from pointer to array to array of pointers

Allocate the queue pairs individually instead of as a group.  This
allows for much easier queue management as it is possible to dynamically
resize the queues without having to free and allocate the entire block.

Ease statistic collection by treating Tx/Rx queue pairs as a single
unit.  Each pair is allocated together and starts with a Tx queue and
ends with an Rx queue.  By ordering them this way it is possible to know
the Rx offset based on a pointer to the Tx queue.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: default avatarKavindya Deegala <kavindya.s.deegala@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent cd0b6fa6
...@@ -347,9 +347,9 @@ struct i40e_vsi { ...@@ -347,9 +347,9 @@ struct i40e_vsi {
u32 rx_buf_failed; u32 rx_buf_failed;
u32 rx_page_failed; u32 rx_page_failed;
/* These are arrays of rings, allocated at run-time */ /* These are containers of ring pointers, allocated at run-time */
struct i40e_ring *rx_rings; struct i40e_ring **rx_rings;
struct i40e_ring *tx_rings; struct i40e_ring **tx_rings;
u16 work_limit; u16 work_limit;
/* high bit set means dynamic, use accessor routines to read/write. /* high bit set means dynamic, use accessor routines to read/write.
......
...@@ -258,12 +258,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, ...@@ -258,12 +258,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_tx_buffer); len = sizeof(struct i40e_tx_buffer);
memcpy(p, vsi->tx_rings[i].tx_bi, len); memcpy(p, vsi->tx_rings[i]->tx_bi, len);
p += len; p += len;
} }
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_rx_buffer); len = sizeof(struct i40e_rx_buffer);
memcpy(p, vsi->rx_rings[i].rx_bi, len); memcpy(p, vsi->rx_rings[i]->rx_bi, len);
p += len; p += len;
} }
...@@ -484,99 +484,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -484,99 +484,104 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n", " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy, vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed); vsi->rx_buf_failed, vsi->rx_page_failed);
if (vsi->rx_rings) { rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
dev_info(&pf->pdev->dev, struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
" rx_rings[%i]: desc = %p\n", if (!rx_ring)
i, vsi->rx_rings[i].desc); continue;
dev_info(&pf->pdev->dev,
" rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n", dev_info(&pf->pdev->dev,
i, vsi->rx_rings[i].dev, " rx_rings[%i]: desc = %p\n",
vsi->rx_rings[i].netdev, i, rx_ring->desc);
vsi->rx_rings[i].rx_bi); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
" rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", i, rx_ring->dev,
i, vsi->rx_rings[i].state, rx_ring->netdev,
vsi->rx_rings[i].queue_index, rx_ring->rx_bi);
vsi->rx_rings[i].reg_idx); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " rx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
" rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n", i, rx_ring->state,
i, vsi->rx_rings[i].rx_hdr_len, rx_ring->queue_index,
vsi->rx_rings[i].rx_buf_len, rx_ring->reg_idx);
vsi->rx_rings[i].dtype); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
" rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, rx_ring->rx_hdr_len,
i, vsi->rx_rings[i].hsplit, rx_ring->rx_buf_len,
vsi->rx_rings[i].next_to_use, rx_ring->dtype);
vsi->rx_rings[i].next_to_clean, dev_info(&pf->pdev->dev,
vsi->rx_rings[i].ring_active); " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
dev_info(&pf->pdev->dev, i, rx_ring->hsplit,
" rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n", rx_ring->next_to_use,
i, vsi->rx_rings[i].stats.packets, rx_ring->next_to_clean,
vsi->rx_rings[i].stats.bytes, rx_ring->ring_active);
vsi->rx_rings[i].rx_stats.non_eop_descs); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_stats: packets = %lld, bytes = %lld, non_eop_descs = %lld\n",
" rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n", i, rx_ring->stats.packets,
i, rx_ring->stats.bytes,
vsi->rx_rings[i].rx_stats.alloc_rx_page_failed, rx_ring->rx_stats.non_eop_descs);
vsi->rx_rings[i].rx_stats.alloc_rx_buff_failed); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " rx_rings[%i]: rx_stats: alloc_rx_page_failed = %lld, alloc_rx_buff_failed = %lld\n",
" rx_rings[%i]: size = %i, dma = 0x%08lx\n", i,
i, vsi->rx_rings[i].size, rx_ring->rx_stats.alloc_rx_page_failed,
(long unsigned int)vsi->rx_rings[i].dma); rx_ring->rx_stats.alloc_rx_buff_failed);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n", " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, vsi->rx_rings[i].vsi, i, rx_ring->size,
vsi->rx_rings[i].q_vector); (long unsigned int)rx_ring->dma);
} dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, rx_ring->vsi,
rx_ring->q_vector);
} }
if (vsi->tx_rings) { for (i = 0; i < vsi->num_queue_pairs; i++) {
for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
dev_info(&pf->pdev->dev, if (!tx_ring)
" tx_rings[%i]: desc = %p\n", continue;
i, vsi->tx_rings[i].desc); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " tx_rings[%i]: desc = %p\n",
" tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n", i, tx_ring->desc);
i, vsi->tx_rings[i].dev, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].netdev, " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
vsi->tx_rings[i].tx_bi); i, tx_ring->dev,
dev_info(&pf->pdev->dev, tx_ring->netdev,
" tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n", tx_ring->tx_bi);
i, vsi->tx_rings[i].state, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].queue_index, " tx_rings[%i]: state = %li, queue_index = %d, reg_idx = %d\n",
vsi->tx_rings[i].reg_idx); i, tx_ring->state,
dev_info(&pf->pdev->dev, tx_ring->queue_index,
" tx_rings[%i]: dtype = %d\n", tx_ring->reg_idx);
i, vsi->tx_rings[i].dtype); dev_info(&pf->pdev->dev,
dev_info(&pf->pdev->dev, " tx_rings[%i]: dtype = %d\n",
" tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", i, tx_ring->dtype);
i, vsi->tx_rings[i].hsplit, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].next_to_use, " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
vsi->tx_rings[i].next_to_clean, i, tx_ring->hsplit,
vsi->tx_rings[i].ring_active); tx_ring->next_to_use,
dev_info(&pf->pdev->dev, tx_ring->next_to_clean,
" tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n", tx_ring->ring_active);
i, vsi->tx_rings[i].stats.packets, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].stats.bytes, " tx_rings[%i]: tx_stats: packets = %lld, bytes = %lld, restart_queue = %lld\n",
vsi->tx_rings[i].tx_stats.restart_queue); i, tx_ring->stats.packets,
dev_info(&pf->pdev->dev, tx_ring->stats.bytes,
" tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n", tx_ring->tx_stats.restart_queue);
i, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].tx_stats.tx_busy, " tx_rings[%i]: tx_stats: tx_busy = %lld, tx_done_old = %lld\n",
vsi->tx_rings[i].tx_stats.tx_done_old); i,
dev_info(&pf->pdev->dev, tx_ring->tx_stats.tx_busy,
" tx_rings[%i]: size = %i, dma = 0x%08lx\n", tx_ring->tx_stats.tx_done_old);
i, vsi->tx_rings[i].size, dev_info(&pf->pdev->dev,
(long unsigned int)vsi->tx_rings[i].dma); " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
dev_info(&pf->pdev->dev, i, tx_ring->size,
" tx_rings[%i]: vsi = %p, q_vector = %p\n", (long unsigned int)tx_ring->dma);
i, vsi->tx_rings[i].vsi, dev_info(&pf->pdev->dev,
vsi->tx_rings[i].q_vector); " tx_rings[%i]: vsi = %p, q_vector = %p\n",
dev_info(&pf->pdev->dev, i, tx_ring->vsi,
" tx_rings[%i]: DCB tc = %d\n", tx_ring->q_vector);
i, vsi->tx_rings[i].dcb_tc); dev_info(&pf->pdev->dev,
} " tx_rings[%i]: DCB tc = %d\n",
i, tx_ring->dcb_tc);
} }
rcu_read_unlock();
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n",
vsi->work_limit, vsi->rx_itr_setting, vsi->work_limit, vsi->rx_itr_setting,
...@@ -782,9 +787,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, ...@@ -782,9 +787,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
return; return;
} }
if (is_rx_ring) if (is_rx_ring)
ring = vsi->rx_rings[ring_id]; ring = *vsi->rx_rings[ring_id];
else else
ring = vsi->tx_rings[ring_id]; ring = *vsi->tx_rings[ring_id];
if (cnt == 2) { if (cnt == 2) {
dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n",
vsi_seid, is_rx_ring ? "rx" : "tx", ring_id); vsi_seid, is_rx_ring ? "rx" : "tx", ring_id);
......
...@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev, ...@@ -399,8 +399,8 @@ static void i40e_get_ringparam(struct net_device *netdev,
ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS; ring->tx_max_pending = I40E_MAX_NUM_DESCRIPTORS;
ring->rx_mini_max_pending = 0; ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0; ring->rx_jumbo_max_pending = 0;
ring->rx_pending = vsi->rx_rings[0].count; ring->rx_pending = vsi->rx_rings[0]->count;
ring->tx_pending = vsi->tx_rings[0].count; ring->tx_pending = vsi->tx_rings[0]->count;
ring->rx_mini_pending = 0; ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0; ring->rx_jumbo_pending = 0;
} }
...@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -429,8 +429,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE); new_rx_count = ALIGN(new_rx_count, I40E_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */ /* if nothing to do return success */
if ((new_tx_count == vsi->tx_rings[0].count) && if ((new_tx_count == vsi->tx_rings[0]->count) &&
(new_rx_count == vsi->rx_rings[0].count)) (new_rx_count == vsi->rx_rings[0]->count))
return 0; return 0;
while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
...@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -439,8 +439,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (!netif_running(vsi->netdev)) { if (!netif_running(vsi->netdev)) {
/* simple case - set for the next time the netdev is started */ /* simple case - set for the next time the netdev is started */
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
vsi->tx_rings[i].count = new_tx_count; vsi->tx_rings[i]->count = new_tx_count;
vsi->rx_rings[i].count = new_rx_count; vsi->rx_rings[i]->count = new_rx_count;
} }
goto done; goto done;
} }
...@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -451,10 +451,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/ */
/* alloc updated Tx resources */ /* alloc updated Tx resources */
if (new_tx_count != vsi->tx_rings[0].count) { if (new_tx_count != vsi->tx_rings[0]->count) {
netdev_info(netdev, netdev_info(netdev,
"Changing Tx descriptor count from %d to %d.\n", "Changing Tx descriptor count from %d to %d.\n",
vsi->tx_rings[0].count, new_tx_count); vsi->tx_rings[0]->count, new_tx_count);
tx_rings = kcalloc(vsi->alloc_queue_pairs, tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL); sizeof(struct i40e_ring), GFP_KERNEL);
if (!tx_rings) { if (!tx_rings) {
...@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -464,7 +464,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
tx_rings[i] = vsi->tx_rings[i]; tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count; tx_rings[i].count = new_tx_count;
err = i40e_setup_tx_descriptors(&tx_rings[i]); err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) { if (err) {
...@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -481,10 +481,10 @@ static int i40e_set_ringparam(struct net_device *netdev,
} }
/* alloc updated Rx resources */ /* alloc updated Rx resources */
if (new_rx_count != vsi->rx_rings[0].count) { if (new_rx_count != vsi->rx_rings[0]->count) {
netdev_info(netdev, netdev_info(netdev,
"Changing Rx descriptor count from %d to %d\n", "Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0].count, new_rx_count); vsi->rx_rings[0]->count, new_rx_count);
rx_rings = kcalloc(vsi->alloc_queue_pairs, rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL); sizeof(struct i40e_ring), GFP_KERNEL);
if (!rx_rings) { if (!rx_rings) {
...@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -494,7 +494,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
/* clone ring and setup updated count */ /* clone ring and setup updated count */
rx_rings[i] = vsi->rx_rings[i]; rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count; rx_rings[i].count = new_rx_count;
err = i40e_setup_rx_descriptors(&rx_rings[i]); err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) { if (err) {
...@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -517,8 +517,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (tx_rings) { if (tx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_tx_resources(&vsi->tx_rings[i]); i40e_free_tx_resources(vsi->tx_rings[i]);
vsi->tx_rings[i] = tx_rings[i]; *vsi->tx_rings[i] = tx_rings[i];
} }
kfree(tx_rings); kfree(tx_rings);
tx_rings = NULL; tx_rings = NULL;
...@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev, ...@@ -526,8 +526,8 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) { if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_rx_resources(&vsi->rx_rings[i]); i40e_free_rx_resources(vsi->rx_rings[i]);
vsi->rx_rings[i] = rx_rings[i]; *vsi->rx_rings[i] = rx_rings[i];
} }
kfree(rx_rings); kfree(rx_rings);
rx_rings = NULL; rx_rings = NULL;
...@@ -588,10 +588,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, ...@@ -588,10 +588,10 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p; sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
} }
for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) { for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
data[i] = vsi->tx_rings[j].stats.packets; data[i] = vsi->tx_rings[j]->stats.packets;
data[i + 1] = vsi->tx_rings[j].stats.bytes; data[i + 1] = vsi->tx_rings[j]->stats.bytes;
data[i + 2] = vsi->rx_rings[j].stats.packets; data[i + 2] = vsi->rx_rings[j]->stats.packets;
data[i + 3] = vsi->rx_rings[j].stats.bytes; data[i + 3] = vsi->rx_rings[j]->stats.bytes;
} }
if (vsi == pf->vsi[pf->lan_vsi]) { if (vsi == pf->vsi[pf->lan_vsi]) {
for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) { for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
......
...@@ -376,14 +376,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi) ...@@ -376,14 +376,14 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets)); memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
if (vsi->rx_rings) if (vsi->rx_rings)
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
memset(&vsi->rx_rings[i].stats, 0 , memset(&vsi->rx_rings[i]->stats, 0 ,
sizeof(vsi->rx_rings[i].stats)); sizeof(vsi->rx_rings[i]->stats));
memset(&vsi->rx_rings[i].rx_stats, 0 , memset(&vsi->rx_rings[i]->rx_stats, 0 ,
sizeof(vsi->rx_rings[i].rx_stats)); sizeof(vsi->rx_rings[i]->rx_stats));
memset(&vsi->tx_rings[i].stats, 0 , memset(&vsi->tx_rings[i]->stats, 0 ,
sizeof(vsi->tx_rings[i].stats)); sizeof(vsi->tx_rings[i]->stats));
memset(&vsi->tx_rings[i].tx_stats, 0, memset(&vsi->tx_rings[i]->tx_stats, 0,
sizeof(vsi->tx_rings[i].tx_stats)); sizeof(vsi->tx_rings[i]->tx_stats));
} }
vsi->stat_offsets_loaded = false; vsi->stat_offsets_loaded = false;
} }
...@@ -602,7 +602,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf) ...@@ -602,7 +602,7 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
continue; continue;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *ring = &vsi->tx_rings[i]; struct i40e_ring *ring = vsi->tx_rings[i];
clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state); clear_bit(__I40E_HANG_CHECK_ARMED, &ring->state);
} }
} }
...@@ -656,7 +656,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) ...@@ -656,7 +656,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
continue; continue;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *ring = &vsi->tx_rings[i]; struct i40e_ring *ring = vsi->tx_rings[i];
tc = ring->dcb_tc; tc = ring->dcb_tc;
if (xoff[tc]) if (xoff[tc])
...@@ -711,13 +711,13 @@ void i40e_update_stats(struct i40e_vsi *vsi) ...@@ -711,13 +711,13 @@ void i40e_update_stats(struct i40e_vsi *vsi)
for (q = 0; q < vsi->num_queue_pairs; q++) { for (q = 0; q < vsi->num_queue_pairs; q++) {
struct i40e_ring *p; struct i40e_ring *p;
p = &vsi->rx_rings[q]; p = vsi->rx_rings[q];
rx_b += p->stats.bytes; rx_b += p->stats.bytes;
rx_p += p->stats.packets; rx_p += p->stats.packets;
rx_buf += p->rx_stats.alloc_rx_buff_failed; rx_buf += p->rx_stats.alloc_rx_buff_failed;
rx_page += p->rx_stats.alloc_rx_page_failed; rx_page += p->rx_stats.alloc_rx_page_failed;
p = &vsi->tx_rings[q]; p = vsi->tx_rings[q];
tx_b += p->stats.bytes; tx_b += p->stats.bytes;
tx_p += p->stats.packets; tx_p += p->stats.packets;
tx_restart += p->tx_stats.restart_queue; tx_restart += p->tx_stats.restart_queue;
...@@ -1992,7 +1992,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi) ...@@ -1992,7 +1992,7 @@ static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
int i, err = 0; int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++) for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_tx_descriptors(&vsi->tx_rings[i]); err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
return err; return err;
} }
...@@ -2008,8 +2008,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi) ...@@ -2008,8 +2008,8 @@ static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
int i; int i;
for (i = 0; i < vsi->num_queue_pairs; i++) for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->tx_rings[i].desc) if (vsi->tx_rings[i]->desc)
i40e_free_tx_resources(&vsi->tx_rings[i]); i40e_free_tx_resources(vsi->tx_rings[i]);
} }
/** /**
...@@ -2027,7 +2027,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi) ...@@ -2027,7 +2027,7 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
int i, err = 0; int i, err = 0;
for (i = 0; i < vsi->num_queue_pairs && !err; i++) for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_setup_rx_descriptors(&vsi->rx_rings[i]); err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
return err; return err;
} }
...@@ -2042,8 +2042,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi) ...@@ -2042,8 +2042,8 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
int i; int i;
for (i = 0; i < vsi->num_queue_pairs; i++) for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->rx_rings[i].desc) if (vsi->rx_rings[i]->desc)
i40e_free_rx_resources(&vsi->rx_rings[i]); i40e_free_rx_resources(vsi->rx_rings[i]);
} }
/** /**
...@@ -2227,8 +2227,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi) ...@@ -2227,8 +2227,8 @@ static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
int err = 0; int err = 0;
u16 i; u16 i;
for (i = 0; (i < vsi->num_queue_pairs) && (!err); i++) for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
err = i40e_configure_tx_ring(&vsi->tx_rings[i]); err = i40e_configure_tx_ring(vsi->tx_rings[i]);
return err; return err;
} }
...@@ -2278,7 +2278,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi) ...@@ -2278,7 +2278,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
/* set up individual rings */ /* set up individual rings */
for (i = 0; i < vsi->num_queue_pairs && !err; i++) for (i = 0; i < vsi->num_queue_pairs && !err; i++)
err = i40e_configure_rx_ring(&vsi->rx_rings[i]); err = i40e_configure_rx_ring(vsi->rx_rings[i]);
return err; return err;
} }
...@@ -2302,8 +2302,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) ...@@ -2302,8 +2302,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
qoffset = vsi->tc_config.tc_info[n].qoffset; qoffset = vsi->tc_config.tc_info[n].qoffset;
qcount = vsi->tc_config.tc_info[n].qcount; qcount = vsi->tc_config.tc_info[n].qcount;
for (i = qoffset; i < (qoffset + qcount); i++) { for (i = qoffset; i < (qoffset + qcount); i++) {
struct i40e_ring *rx_ring = &vsi->rx_rings[i]; struct i40e_ring *rx_ring = vsi->rx_rings[i];
struct i40e_ring *tx_ring = &vsi->tx_rings[i]; struct i40e_ring *tx_ring = vsi->tx_rings[i];
rx_ring->dcb_tc = n; rx_ring->dcb_tc = n;
tx_ring->dcb_tc = n; tx_ring->dcb_tc = n;
} }
...@@ -2615,8 +2615,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi) ...@@ -2615,8 +2615,8 @@ static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
int i; int i;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i].reg_idx), 0); wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i].reg_idx), 0); wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
} }
if (pf->flags & I40E_FLAG_MSIX_ENABLED) { if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
...@@ -2786,8 +2786,8 @@ static irqreturn_t i40e_intr(int irq, void *data) ...@@ -2786,8 +2786,8 @@ static irqreturn_t i40e_intr(int irq, void *data)
static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx) static void map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
{ {
struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx]; struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
struct i40e_ring *tx_ring = &(vsi->tx_rings[qp_idx]); struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
struct i40e_ring *rx_ring = &(vsi->rx_rings[qp_idx]); struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
tx_ring->q_vector = q_vector; tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring; tx_ring->next = q_vector->tx.ring;
...@@ -3792,8 +3792,8 @@ void i40e_down(struct i40e_vsi *vsi) ...@@ -3792,8 +3792,8 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_napi_disable_all(vsi); i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_clean_tx_ring(&vsi->tx_rings[i]); i40e_clean_tx_ring(vsi->tx_rings[i]);
i40e_clean_rx_ring(&vsi->rx_rings[i]); i40e_clean_rx_ring(vsi->rx_rings[i]);
} }
} }
...@@ -4220,9 +4220,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) ...@@ -4220,9 +4220,9 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
continue; continue;
for (i = 0; i < vsi->num_queue_pairs; i++) { for (i = 0; i < vsi->num_queue_pairs; i++) {
set_check_for_tx_hang(&vsi->tx_rings[i]); set_check_for_tx_hang(vsi->tx_rings[i]);
if (test_bit(__I40E_HANG_CHECK_ARMED, if (test_bit(__I40E_HANG_CHECK_ARMED,
&vsi->tx_rings[i].state)) &vsi->tx_rings[i]->state))
armed++; armed++;
} }
...@@ -4959,6 +4959,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) ...@@ -4959,6 +4959,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
int ret = -ENODEV; int ret = -ENODEV;
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
int sz_vectors; int sz_vectors;
int sz_rings;
int vsi_idx; int vsi_idx;
int i; int i;
...@@ -5004,7 +5005,18 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) ...@@ -5004,7 +5005,18 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->work_limit = I40E_DEFAULT_IRQ_WORK; vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list); INIT_LIST_HEAD(&vsi->mac_filter_list);
i40e_set_num_rings_in_vsi(vsi); ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
goto err_rings;
/* allocate memory for ring pointers */
sz_rings = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
vsi->tx_rings = kzalloc(sz_rings, GFP_KERNEL);
if (!vsi->tx_rings) {
ret = -ENOMEM;
goto err_rings;
}
vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
/* allocate memory for q_vector pointers */ /* allocate memory for q_vector pointers */
sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors; sz_vectors = sizeof(struct i40e_q_vectors *) * vsi->num_q_vectors;
...@@ -5022,6 +5034,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) ...@@ -5022,6 +5034,8 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
goto unlock_pf; goto unlock_pf;
err_vectors: err_vectors:
kfree(vsi->tx_rings);
err_rings:
pf->next_vsi = i - 1; pf->next_vsi = i - 1;
kfree(vsi); kfree(vsi);
unlock_pf: unlock_pf:
...@@ -5067,6 +5081,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) ...@@ -5067,6 +5081,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
/* free the ring and vector containers */ /* free the ring and vector containers */
kfree(vsi->q_vectors); kfree(vsi->q_vectors);
kfree(vsi->tx_rings);
pf->vsi[vsi->idx] = NULL; pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi) if (vsi->idx < pf->next_vsi)
...@@ -5080,6 +5095,23 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) ...@@ -5080,6 +5095,23 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
return 0; return 0;
} }
/**
* i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being cleaned
**/
static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
{
int i;
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
kfree_rcu(vsi->tx_rings[i], rcu);
vsi->tx_rings[i] = NULL;
vsi->rx_rings[i] = NULL;
}
return 0;
}
/** /**
* i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being configured * @vsi: the VSI being configured
...@@ -5087,28 +5119,16 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) ...@@ -5087,28 +5119,16 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
static int i40e_alloc_rings(struct i40e_vsi *vsi) static int i40e_alloc_rings(struct i40e_vsi *vsi)
{ {
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
int ret = 0;
int i; int i;
vsi->rx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!vsi->rx_rings) {
ret = -ENOMEM;
goto err_alloc_rings;
}
vsi->tx_rings = kcalloc(vsi->alloc_queue_pairs,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!vsi->tx_rings) {
ret = -ENOMEM;
kfree(vsi->rx_rings);
goto err_alloc_rings;
}
/* Set basic values in the rings to be used later during open() */ /* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) { for (i = 0; i < vsi->alloc_queue_pairs; i++) {
struct i40e_ring *rx_ring = &vsi->rx_rings[i]; struct i40e_ring *tx_ring;
struct i40e_ring *tx_ring = &vsi->tx_rings[i]; struct i40e_ring *rx_ring;
tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
if (!tx_ring)
goto err_out;
tx_ring->queue_index = i; tx_ring->queue_index = i;
tx_ring->reg_idx = vsi->base_queue + i; tx_ring->reg_idx = vsi->base_queue + i;
...@@ -5119,7 +5139,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ...@@ -5119,7 +5139,9 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
tx_ring->count = vsi->num_desc; tx_ring->count = vsi->num_desc;
tx_ring->size = 0; tx_ring->size = 0;
tx_ring->dcb_tc = 0; tx_ring->dcb_tc = 0;
vsi->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
rx_ring->queue_index = i; rx_ring->queue_index = i;
rx_ring->reg_idx = vsi->base_queue + i; rx_ring->reg_idx = vsi->base_queue + i;
rx_ring->ring_active = false; rx_ring->ring_active = false;
...@@ -5133,24 +5155,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ...@@ -5133,24 +5155,14 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
set_ring_16byte_desc_enabled(rx_ring); set_ring_16byte_desc_enabled(rx_ring);
else else
clear_ring_16byte_desc_enabled(rx_ring); clear_ring_16byte_desc_enabled(rx_ring);
} vsi->rx_rings[i] = rx_ring;
err_alloc_rings:
return ret;
}
/**
* i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
* @vsi: the VSI being cleaned
**/
static int i40e_vsi_clear_rings(struct i40e_vsi *vsi)
{
if (vsi) {
kfree(vsi->rx_rings);
kfree(vsi->tx_rings);
} }
return 0; return 0;
err_out:
i40e_vsi_clear_rings(vsi);
return -ENOMEM;
} }
/** /**
......
...@@ -64,7 +64,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data, ...@@ -64,7 +64,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
if (!vsi) if (!vsi)
return -ENOENT; return -ENOENT;
tx_ring = &vsi->tx_rings[0]; tx_ring = vsi->tx_rings[0];
dev = tx_ring->dev; dev = tx_ring->dev;
dma = dma_map_single(dev, fdir_data->raw_packet, dma = dma_map_single(dev, fdir_data->raw_packet,
...@@ -1823,7 +1823,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -1823,7 +1823,7 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
struct i40e_ring *tx_ring = &vsi->tx_rings[skb->queue_mapping]; struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works /* hardware can't handle really short frames, hardware padding works
* beyond this point * beyond this point
......
...@@ -228,6 +228,8 @@ struct i40e_ring { ...@@ -228,6 +228,8 @@ struct i40e_ring {
struct i40e_vsi *vsi; /* Backreference to associated VSI */ struct i40e_vsi *vsi; /* Backreference to associated VSI */
struct i40e_q_vector *q_vector; /* Backreference to associated vector */ struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
enum i40e_latency_range { enum i40e_latency_range {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment