Commit c506cc5b authored by David S. Miller's avatar David S. Miller

Merge branch 'ibmvnic-next'

Sukadev Bhattiprolu says:

====================
ibmvnic: Reuse ltb, rx, tx pools

It can take a long time to free and reallocate rx and tx pools and long
term buffer (LTB) during each reset of the VNIC. This is specially true
when the partition (LPAR) is heavily loaded and going through a Logical
Partition Migration (LPM). The long drawn reset causes the LPAR to lose
connectivity for extended periods of time and results in "RMC connection"
errors and the LPM failing.

What is worse is that during the LPM we could get a failover because
of the lost connectivity. At that point, the vnic driver releases
even the resources it has already allocated and starts over.

As long as the resources we have already allocated are valid/applicable,
we might as well hold on to them while trying to allocate the remaining
resources. This patch set attempts to reuse the resources previously
allocated as long as they are valid. It seems to vastly improve the
time taken for the vnic reset and signficantly reduces the chances of
getting the RMC connection errors. We do get still them occasionally,
but appears to be for reasons other than memory allocation delays and
those are still being investigated.

If the backing devices for a vnic adapter are not "matched" (see "pool
parameters" in patches 8 and 9) it is possible that we will still free
all the resources and allocate them. If that becomes a common problem,
we have to address it separately.

Thanks to input and extensive testing from Brian King, Cris Forno,
Dany Madden, Rick Lindsley.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 923990f6 bbd80930
...@@ -108,6 +108,8 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter); ...@@ -108,6 +108,8 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter); static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq); struct ibmvnic_sub_crq_queue *tx_scrq);
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
struct ibmvnic_stat { struct ibmvnic_stat {
char name[ETH_GSTRING_LEN]; char name[ETH_GSTRING_LEN];
...@@ -214,22 +216,77 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter, ...@@ -214,22 +216,77 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
return -ETIMEDOUT; return -ETIMEDOUT;
} }
/**
* reuse_ltb() - Check if a long term buffer can be reused
* @ltb: The long term buffer to be checked
* @size: The size of the long term buffer.
*
* An LTB can be reused unless its size has changed.
*
* Return: Return true if the LTB can be reused, false otherwise.
*/
static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
{
return (ltb->buff && ltb->size == size);
}
/**
* alloc_long_term_buff() - Allocate a long term buffer (LTB)
*
* @adapter: ibmvnic adapter associated to the LTB
* @ltb: container object for the LTB
* @size: size of the LTB
*
* Allocate an LTB of the specified size and notify VIOS.
*
* If the given @ltb already has the correct size, reuse it. Otherwise if
* its non-NULL, free it. Then allocate a new one of the correct size.
* Notify the VIOS either way since we may now be working with a new VIOS.
*
* Allocating larger chunks of memory during resets, specially LPM or under
* low memory situations can cause resets to fail/timeout and for LPAR to
* lose connectivity. So hold onto the LTB even if we fail to communicate
* with the VIOS and reuse it on next open. Free LTB when adapter is closed.
*
* Return: 0 if we were able to allocate the LTB and notify the VIOS and
* a negative value otherwise.
*/
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size) struct ibmvnic_long_term_buff *ltb, int size)
{ {
struct device *dev = &adapter->vdev->dev; struct device *dev = &adapter->vdev->dev;
int rc; int rc;
ltb->size = size; if (!reuse_ltb(ltb, size)) {
ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr, dev_dbg(dev,
GFP_KERNEL); "LTB size changed from 0x%llx to 0x%x, reallocating\n",
ltb->size, size);
free_long_term_buff(adapter, ltb);
}
if (!ltb->buff) { if (ltb->buff) {
dev_err(dev, "Couldn't alloc long term buffer\n"); dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
return -ENOMEM; ltb->map_id, ltb->size);
} else {
ltb->buff = dma_alloc_coherent(dev, size, &ltb->addr,
GFP_KERNEL);
if (!ltb->buff) {
dev_err(dev, "Couldn't alloc long term buffer\n");
return -ENOMEM;
}
ltb->size = size;
ltb->map_id = find_first_zero_bit(adapter->map_ids,
MAX_MAP_ID);
bitmap_set(adapter->map_ids, ltb->map_id, 1);
dev_dbg(dev,
"Allocated new LTB [map %d, size 0x%llx]\n",
ltb->map_id, ltb->size);
} }
ltb->map_id = adapter->map_id;
adapter->map_id++; /* Ensure ltb is zeroed - specially when reusing it. */
memset(ltb->buff, 0, ltb->size);
mutex_lock(&adapter->fw_lock); mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0; adapter->fw_done_rc = 0;
...@@ -243,24 +300,20 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter, ...@@ -243,24 +300,20 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
if (rc) { if (rc) {
dev_err(dev, dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
"Long term map request aborted or timed out,rc = %d\n",
rc); rc);
goto out; goto out;
} }
if (adapter->fw_done_rc) { if (adapter->fw_done_rc) {
dev_err(dev, "Couldn't map long term buffer,rc = %d\n", dev_err(dev, "Couldn't map LTB, rc = %d\n",
adapter->fw_done_rc); adapter->fw_done_rc);
rc = -1; rc = -1;
goto out; goto out;
} }
rc = 0; rc = 0;
out: out:
if (rc) { /* don't free LTB on communication error - see function header */
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
ltb->buff = NULL;
}
mutex_unlock(&adapter->fw_lock); mutex_unlock(&adapter->fw_lock);
return rc; return rc;
} }
...@@ -281,48 +334,15 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, ...@@ -281,48 +334,15 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_MOBILITY && adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_TIMEOUT) adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id); send_request_unmap(adapter, ltb->map_id);
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
ltb->buff = NULL; ltb->buff = NULL;
/* mark this map_id free */
bitmap_clear(adapter->map_ids, ltb->map_id, 1);
ltb->map_id = 0; ltb->map_id = 0;
} }
static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb)
{
struct device *dev = &adapter->vdev->dev;
int rc;
memset(ltb->buff, 0, ltb->size);
mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0;
reinit_completion(&adapter->fw_done);
rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
if (rc) {
mutex_unlock(&adapter->fw_lock);
return rc;
}
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
if (rc) {
dev_info(dev,
"Reset failed, long term map request timed out or aborted\n");
mutex_unlock(&adapter->fw_lock);
return rc;
}
if (adapter->fw_done_rc) {
dev_info(dev,
"Reset failed, attempting to free and reallocate buffer\n");
free_long_term_buff(adapter, ltb);
mutex_unlock(&adapter->fw_lock);
return alloc_long_term_buff(adapter, ltb, ltb->size);
}
mutex_unlock(&adapter->fw_lock);
return 0;
}
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter) static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{ {
int i; int i;
...@@ -363,31 +383,41 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, ...@@ -363,31 +383,41 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
* be 0. * be 0.
*/ */
for (i = ind_bufp->index; i < count; ++i) { for (i = ind_bufp->index; i < count; ++i) {
skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); index = pool->free_map[pool->next_free];
/* We maybe reusing the skb from earlier resets. Allocate
* only if necessary. But since the LTB may have changed
* during reset (see init_rx_pools()), update LTB below
* even if reusing skb.
*/
skb = pool->rx_buff[index].skb;
if (!skb) { if (!skb) {
dev_err(dev, "Couldn't replenish rx buff\n"); skb = netdev_alloc_skb(adapter->netdev,
adapter->replenish_no_mem++; pool->buff_size);
break; if (!skb) {
dev_err(dev, "Couldn't replenish rx buff\n");
adapter->replenish_no_mem++;
break;
}
} }
index = pool->free_map[pool->next_free]; pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
pool->next_free = (pool->next_free + 1) % pool->size;
if (pool->rx_buff[index].skb)
dev_err(dev, "Inconsistent free_map!\n");
/* Copy the skb to the long term mapped DMA buffer */ /* Copy the skb to the long term mapped DMA buffer */
offset = index * pool->buff_size; offset = index * pool->buff_size;
dst = pool->long_term_buff.buff + offset; dst = pool->long_term_buff.buff + offset;
memset(dst, 0, pool->buff_size); memset(dst, 0, pool->buff_size);
dma_addr = pool->long_term_buff.addr + offset; dma_addr = pool->long_term_buff.addr + offset;
pool->rx_buff[index].data = dst;
pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP; /* add the skb to an rx_buff in the pool */
pool->rx_buff[index].data = dst;
pool->rx_buff[index].dma = dma_addr; pool->rx_buff[index].dma = dma_addr;
pool->rx_buff[index].skb = skb; pool->rx_buff[index].skb = skb;
pool->rx_buff[index].pool_index = pool->index; pool->rx_buff[index].pool_index = pool->index;
pool->rx_buff[index].size = pool->buff_size; pool->rx_buff[index].size = pool->buff_size;
/* queue the rx_buff for the next send_subcrq_indirect */
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++]; sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
memset(sub_crq, 0, sizeof(*sub_crq)); memset(sub_crq, 0, sizeof(*sub_crq));
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD; sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
...@@ -405,7 +435,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter, ...@@ -405,7 +435,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
shift = 8; shift = 8;
#endif #endif
sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift); sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
pool->next_free = (pool->next_free + 1) % pool->size;
/* if send_subcrq_indirect queue is full, flush to VIOS */
if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS || if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
i == count - 1) { i == count - 1) {
lpar_rc = lpar_rc =
...@@ -523,53 +554,12 @@ static int init_stats_token(struct ibmvnic_adapter *adapter) ...@@ -523,53 +554,12 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
return 0; return 0;
} }
static int reset_rx_pools(struct ibmvnic_adapter *adapter) /**
{ * release_rx_pools() - Release any rx pools attached to @adapter.
struct ibmvnic_rx_pool *rx_pool; * @adapter: ibmvnic adapter
u64 buff_size; *
int rx_scrqs; * Safe to call this multiple times - even if no pools are attached.
int i, j, rc; */
if (!adapter->rx_pool)
return -1;
buff_size = adapter->cur_rx_buf_sz;
rx_scrqs = adapter->num_active_rx_pools;
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
if (rx_pool->buff_size != buff_size) {
free_long_term_buff(adapter, &rx_pool->long_term_buff);
rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rc = alloc_long_term_buff(adapter,
&rx_pool->long_term_buff,
rx_pool->size *
rx_pool->buff_size);
} else {
rc = reset_long_term_buff(adapter,
&rx_pool->long_term_buff);
}
if (rc)
return rc;
for (j = 0; j < rx_pool->size; j++)
rx_pool->free_map[j] = j;
memset(rx_pool->rx_buff, 0,
rx_pool->size * sizeof(struct ibmvnic_rx_buff));
atomic_set(&rx_pool->available, 0);
rx_pool->next_alloc = 0;
rx_pool->next_free = 0;
rx_pool->active = 1;
}
return 0;
}
static void release_rx_pools(struct ibmvnic_adapter *adapter) static void release_rx_pools(struct ibmvnic_adapter *adapter)
{ {
struct ibmvnic_rx_pool *rx_pool; struct ibmvnic_rx_pool *rx_pool;
...@@ -584,6 +574,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) ...@@ -584,6 +574,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i); netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
kfree(rx_pool->free_map); kfree(rx_pool->free_map);
free_long_term_buff(adapter, &rx_pool->long_term_buff); free_long_term_buff(adapter, &rx_pool->long_term_buff);
if (!rx_pool->rx_buff) if (!rx_pool->rx_buff)
...@@ -602,21 +593,91 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) ...@@ -602,21 +593,91 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->rx_pool); kfree(adapter->rx_pool);
adapter->rx_pool = NULL; adapter->rx_pool = NULL;
adapter->num_active_rx_pools = 0; adapter->num_active_rx_pools = 0;
adapter->prev_rx_pool_size = 0;
} }
/**
* reuse_rx_pools() - Check if the existing rx pools can be reused.
* @adapter: ibmvnic adapter
*
* Check if the existing rx pools in the adapter can be reused. The
* pools can be reused if the pool parameters (number of pools,
* number of buffers in the pool and size of each buffer) have not
* changed.
*
* NOTE: This assumes that all pools have the same number of buffers
* which is the case currently. If that changes, we must fix this.
*
* Return: true if the rx pools can be reused, false otherwise.
*/
static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
{
u64 old_num_pools, new_num_pools;
u64 old_pool_size, new_pool_size;
u64 old_buff_size, new_buff_size;
if (!adapter->rx_pool)
return false;
old_num_pools = adapter->num_active_rx_pools;
new_num_pools = adapter->req_rx_queues;
old_pool_size = adapter->prev_rx_pool_size;
new_pool_size = adapter->req_rx_add_entries_per_subcrq;
old_buff_size = adapter->prev_rx_buf_sz;
new_buff_size = adapter->cur_rx_buf_sz;
/* Require buff size to be exactly same for now */
if (old_buff_size != new_buff_size)
return false;
if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
return true;
if (old_num_pools < adapter->min_rx_queues ||
old_num_pools > adapter->max_rx_queues ||
old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
old_pool_size > adapter->max_rx_add_entries_per_subcrq)
return false;
return true;
}
/**
* init_rx_pools(): Initialize the set of receiver pools in the adapter.
* @netdev: net device associated with the vnic interface
*
* Initialize the set of receiver pools in the ibmvnic adapter associated
* with the net_device @netdev. If possible, reuse the existing rx pools.
* Otherwise free any existing pools and allocate a new set of pools
* before initializing them.
*
* Return: 0 on success and negative value on error.
*/
static int init_rx_pools(struct net_device *netdev) static int init_rx_pools(struct net_device *netdev)
{ {
struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->vdev->dev; struct device *dev = &adapter->vdev->dev;
struct ibmvnic_rx_pool *rx_pool; struct ibmvnic_rx_pool *rx_pool;
int rxadd_subcrqs; u64 num_pools;
u64 pool_size; /* # of buffers in one pool */
u64 buff_size; u64 buff_size;
int i, j; int i, j;
rxadd_subcrqs = adapter->num_active_rx_scrqs; pool_size = adapter->req_rx_add_entries_per_subcrq;
num_pools = adapter->req_rx_queues;
buff_size = adapter->cur_rx_buf_sz; buff_size = adapter->cur_rx_buf_sz;
adapter->rx_pool = kcalloc(rxadd_subcrqs, if (reuse_rx_pools(adapter)) {
dev_dbg(dev, "Reusing rx pools\n");
goto update_ltb;
}
/* Allocate/populate the pools. */
release_rx_pools(adapter);
adapter->rx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_rx_pool), sizeof(struct ibmvnic_rx_pool),
GFP_KERNEL); GFP_KERNEL);
if (!adapter->rx_pool) { if (!adapter->rx_pool) {
...@@ -624,26 +685,27 @@ static int init_rx_pools(struct net_device *netdev) ...@@ -624,26 +685,27 @@ static int init_rx_pools(struct net_device *netdev)
return -1; return -1;
} }
adapter->num_active_rx_pools = rxadd_subcrqs; /* Set num_active_rx_pools early. If we fail below after partial
* allocation, release_rx_pools() will know how many to look for.
*/
adapter->num_active_rx_pools = num_pools;
for (i = 0; i < rxadd_subcrqs; i++) { for (i = 0; i < num_pools; i++) {
rx_pool = &adapter->rx_pool[i]; rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, netdev_dbg(adapter->netdev,
"Initializing rx_pool[%d], %lld buffs, %lld bytes each\n", "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
i, adapter->req_rx_add_entries_per_subcrq, i, pool_size, buff_size);
buff_size);
rx_pool->size = adapter->req_rx_add_entries_per_subcrq; rx_pool->size = pool_size;
rx_pool->index = i; rx_pool->index = i;
rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES); rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int), rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
GFP_KERNEL); GFP_KERNEL);
if (!rx_pool->free_map) { if (!rx_pool->free_map) {
release_rx_pools(adapter); dev_err(dev, "Couldn't alloc free_map %d\n", i);
return -1; goto out_release;
} }
rx_pool->rx_buff = kcalloc(rx_pool->size, rx_pool->rx_buff = kcalloc(rx_pool->size,
...@@ -651,69 +713,58 @@ static int init_rx_pools(struct net_device *netdev) ...@@ -651,69 +713,58 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL); GFP_KERNEL);
if (!rx_pool->rx_buff) { if (!rx_pool->rx_buff) {
dev_err(dev, "Couldn't alloc rx buffers\n"); dev_err(dev, "Couldn't alloc rx buffers\n");
release_rx_pools(adapter); goto out_release;
return -1;
} }
if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
rx_pool->size * rx_pool->buff_size)) {
release_rx_pools(adapter);
return -1;
}
for (j = 0; j < rx_pool->size; ++j)
rx_pool->free_map[j] = j;
atomic_set(&rx_pool->available, 0);
rx_pool->next_alloc = 0;
rx_pool->next_free = 0;
} }
return 0; adapter->prev_rx_pool_size = pool_size;
} adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_tx_pool *tx_pool)
{
int rc, i;
rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
if (rc)
return rc;
memset(tx_pool->tx_buff, 0, update_ltb:
tx_pool->num_buffers * for (i = 0; i < num_pools; i++) {
sizeof(struct ibmvnic_tx_buff)); rx_pool = &adapter->rx_pool[i];
dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
for (i = 0; i < tx_pool->num_buffers; i++) i, rx_pool->size, rx_pool->buff_size);
tx_pool->free_map[i] = i;
tx_pool->consumer_index = 0; if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
tx_pool->producer_index = 0; rx_pool->size * rx_pool->buff_size))
goto out;
return 0; for (j = 0; j < rx_pool->size; ++j) {
} struct ibmvnic_rx_buff *rx_buff;
static int reset_tx_pools(struct ibmvnic_adapter *adapter) rx_pool->free_map[j] = j;
{
int tx_scrqs;
int i, rc;
if (!adapter->tx_pool) /* NOTE: Don't clear rx_buff->skb here - will leak
return -1; * memory! replenish_rx_pool() will reuse skbs or
* allocate as necessary.
*/
rx_buff = &rx_pool->rx_buff[j];
rx_buff->dma = 0;
rx_buff->data = 0;
rx_buff->size = 0;
rx_buff->pool_index = 0;
}
tx_scrqs = adapter->num_active_tx_pools; /* Mark pool "empty" so replenish_rx_pools() will
for (i = 0; i < tx_scrqs; i++) { * update the LTB info for each buffer
ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); */
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); atomic_set(&rx_pool->available, 0);
if (rc) rx_pool->next_alloc = 0;
return rc; rx_pool->next_free = 0;
rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); /* replenish_rx_pool() may have called deactivate_rx_pools()
if (rc) * on failover. Ensure pool is active now.
return rc; */
rx_pool->active = 1;
} }
return 0; return 0;
out_release:
release_rx_pools(adapter);
out:
/* We failed to allocate one or more LTBs or map them on the VIOS.
* Hold onto the pools and any LTBs that we did allocate/map.
*/
return -1;
} }
static void release_vpd_data(struct ibmvnic_adapter *adapter) static void release_vpd_data(struct ibmvnic_adapter *adapter)
...@@ -735,10 +786,19 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter, ...@@ -735,10 +786,19 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
free_long_term_buff(adapter, &tx_pool->long_term_buff); free_long_term_buff(adapter, &tx_pool->long_term_buff);
} }
/**
* release_tx_pools() - Release any tx pools attached to @adapter.
* @adapter: ibmvnic adapter
*
* Safe to call this multiple times - even if no pools are attached.
*/
static void release_tx_pools(struct ibmvnic_adapter *adapter) static void release_tx_pools(struct ibmvnic_adapter *adapter)
{ {
int i; int i;
/* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
* both NULL or both non-NULL. So we only need to check one.
*/
if (!adapter->tx_pool) if (!adapter->tx_pool)
return; return;
...@@ -752,84 +812,218 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter) ...@@ -752,84 +812,218 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->tso_pool); kfree(adapter->tso_pool);
adapter->tso_pool = NULL; adapter->tso_pool = NULL;
adapter->num_active_tx_pools = 0; adapter->num_active_tx_pools = 0;
adapter->prev_tx_pool_size = 0;
} }
static int init_one_tx_pool(struct net_device *netdev, static int init_one_tx_pool(struct net_device *netdev,
struct ibmvnic_tx_pool *tx_pool, struct ibmvnic_tx_pool *tx_pool,
int num_entries, int buf_size) int pool_size, int buf_size)
{ {
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int i; int i;
tx_pool->tx_buff = kcalloc(num_entries, tx_pool->tx_buff = kcalloc(pool_size,
sizeof(struct ibmvnic_tx_buff), sizeof(struct ibmvnic_tx_buff),
GFP_KERNEL); GFP_KERNEL);
if (!tx_pool->tx_buff) if (!tx_pool->tx_buff)
return -1; return -1;
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff, tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
num_entries * buf_size)) if (!tx_pool->free_map) {
return -1; kfree(tx_pool->tx_buff);
tx_pool->tx_buff = NULL;
tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
if (!tx_pool->free_map)
return -1; return -1;
}
for (i = 0; i < num_entries; i++) for (i = 0; i < pool_size; i++)
tx_pool->free_map[i] = i; tx_pool->free_map[i] = i;
tx_pool->consumer_index = 0; tx_pool->consumer_index = 0;
tx_pool->producer_index = 0; tx_pool->producer_index = 0;
tx_pool->num_buffers = num_entries; tx_pool->num_buffers = pool_size;
tx_pool->buf_size = buf_size; tx_pool->buf_size = buf_size;
return 0; return 0;
} }
/**
* reuse_tx_pools() - Check if the existing tx pools can be reused.
* @adapter: ibmvnic adapter
*
* Check if the existing tx pools in the adapter can be reused. The
* pools can be reused if the pool parameters (number of pools,
* number of buffers in the pool and mtu) have not changed.
*
* NOTE: This assumes that all pools have the same number of buffers
* which is the case currently. If that changes, we must fix this.
*
* Return: true if the tx pools can be reused, false otherwise.
*/
static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
{
u64 old_num_pools, new_num_pools;
u64 old_pool_size, new_pool_size;
u64 old_mtu, new_mtu;
if (!adapter->tx_pool)
return false;
old_num_pools = adapter->num_active_tx_pools;
new_num_pools = adapter->num_active_tx_scrqs;
old_pool_size = adapter->prev_tx_pool_size;
new_pool_size = adapter->req_tx_entries_per_subcrq;
old_mtu = adapter->prev_mtu;
new_mtu = adapter->req_mtu;
/* Require MTU to be exactly same to reuse pools for now */
if (old_mtu != new_mtu)
return false;
if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
return true;
if (old_num_pools < adapter->min_tx_queues ||
old_num_pools > adapter->max_tx_queues ||
old_pool_size < adapter->min_tx_entries_per_subcrq ||
old_pool_size > adapter->max_tx_entries_per_subcrq)
return false;
return true;
}
/**
* init_tx_pools(): Initialize the set of transmit pools in the adapter.
* @netdev: net device associated with the vnic interface
*
* Initialize the set of transmit pools in the ibmvnic adapter associated
* with the net_device @netdev. If possible, reuse the existing tx pools.
* Otherwise free any existing pools and allocate a new set of pools
* before initializing them.
*
* Return: 0 on success and negative value on error.
*/
static int init_tx_pools(struct net_device *netdev) static int init_tx_pools(struct net_device *netdev)
{ {
struct ibmvnic_adapter *adapter = netdev_priv(netdev); struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int tx_subcrqs; struct device *dev = &adapter->vdev->dev;
int num_pools;
u64 pool_size; /* # of buffers in pool */
u64 buff_size; u64 buff_size;
int i, rc; int i, j, rc;
num_pools = adapter->req_tx_queues;
tx_subcrqs = adapter->num_active_tx_scrqs; /* We must notify the VIOS about the LTB on all resets - but we only
adapter->tx_pool = kcalloc(tx_subcrqs, * need to alloc/populate pools if either the number of buffers or
* size of each buffer in the pool has changed.
*/
if (reuse_tx_pools(adapter)) {
netdev_dbg(netdev, "Reusing tx pools\n");
goto update_ltb;
}
/* Allocate/populate the pools. */
release_tx_pools(adapter);
pool_size = adapter->req_tx_entries_per_subcrq;
num_pools = adapter->num_active_tx_scrqs;
adapter->tx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool) if (!adapter->tx_pool)
return -1; return -1;
adapter->tso_pool = kcalloc(tx_subcrqs, adapter->tso_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
/* To simplify release_tx_pools() ensure that ->tx_pool and
* ->tso_pool are either both NULL or both non-NULL.
*/
if (!adapter->tso_pool) { if (!adapter->tso_pool) {
kfree(adapter->tx_pool); kfree(adapter->tx_pool);
adapter->tx_pool = NULL; adapter->tx_pool = NULL;
return -1; return -1;
} }
adapter->num_active_tx_pools = tx_subcrqs; /* Set num_active_tx_pools early. If we fail below after partial
* allocation, release_tx_pools() will know how many to look for.
*/
adapter->num_active_tx_pools = num_pools;
buff_size = adapter->req_mtu + VLAN_HLEN;
buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
for (i = 0; i < num_pools; i++) {
dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
i, adapter->req_tx_entries_per_subcrq, buff_size);
for (i = 0; i < tx_subcrqs; i++) {
buff_size = adapter->req_mtu + VLAN_HLEN;
buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rc = init_one_tx_pool(netdev, &adapter->tx_pool[i], rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
adapter->req_tx_entries_per_subcrq, pool_size, buff_size);
buff_size); if (rc)
if (rc) { goto out_release;
release_tx_pools(adapter);
return rc;
}
rc = init_one_tx_pool(netdev, &adapter->tso_pool[i], rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
IBMVNIC_TSO_BUFS, IBMVNIC_TSO_BUFS,
IBMVNIC_TSO_BUF_SZ); IBMVNIC_TSO_BUF_SZ);
if (rc) { if (rc)
release_tx_pools(adapter); goto out_release;
return rc; }
}
adapter->prev_tx_pool_size = pool_size;
adapter->prev_mtu = adapter->req_mtu;
update_ltb:
/* NOTE: All tx_pools have the same number of buffers (which is
* same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
* buffers (see calls init_one_tx_pool() for these).
* For consistency, we use tx_pool->num_buffers and
* tso_pool->num_buffers below.
*/
rc = -1;
for (i = 0; i < num_pools; i++) {
struct ibmvnic_tx_pool *tso_pool;
struct ibmvnic_tx_pool *tx_pool;
u32 ltb_size;
tx_pool = &adapter->tx_pool[i];
ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
ltb_size))
goto out;
dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
i, tx_pool->long_term_buff.buff,
tx_pool->num_buffers, tx_pool->buf_size);
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
for (j = 0; j < tx_pool->num_buffers; j++)
tx_pool->free_map[j] = j;
tso_pool = &adapter->tso_pool[i];
ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
ltb_size))
goto out;
dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
i, tso_pool->long_term_buff.buff,
tso_pool->num_buffers, tso_pool->buf_size);
tso_pool->consumer_index = 0;
tso_pool->producer_index = 0;
for (j = 0; j < tso_pool->num_buffers; j++)
tso_pool->free_map[j] = j;
} }
return 0; return 0;
out_release:
release_tx_pools(adapter);
out:
/* We failed to allocate one or more LTBs or map them on the VIOS.
* Hold onto the pools and any LTBs that we did allocate/map.
*/
return rc;
} }
static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter) static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
...@@ -1020,9 +1214,6 @@ static void release_resources(struct ibmvnic_adapter *adapter) ...@@ -1020,9 +1214,6 @@ static void release_resources(struct ibmvnic_adapter *adapter)
{ {
release_vpd_data(adapter); release_vpd_data(adapter);
release_tx_pools(adapter);
release_rx_pools(adapter);
release_napi(adapter); release_napi(adapter);
release_login_buffer(adapter); release_login_buffer(adapter);
release_login_rsp_buffer(adapter); release_login_rsp_buffer(adapter);
...@@ -1198,8 +1389,6 @@ static int init_resources(struct ibmvnic_adapter *adapter) ...@@ -1198,8 +1389,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
return rc; return rc;
} }
adapter->map_id = 1;
rc = init_napi(adapter); rc = init_napi(adapter);
if (rc) if (rc)
return rc; return rc;
...@@ -1296,6 +1485,8 @@ static int ibmvnic_open(struct net_device *netdev) ...@@ -1296,6 +1485,8 @@ static int ibmvnic_open(struct net_device *netdev)
if (rc) { if (rc) {
netdev_err(netdev, "failed to initialize resources\n"); netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter); release_resources(adapter);
release_rx_pools(adapter);
release_tx_pools(adapter);
goto out; goto out;
} }
} }
...@@ -1424,9 +1615,6 @@ static void ibmvnic_cleanup(struct net_device *netdev) ...@@ -1424,9 +1615,6 @@ static void ibmvnic_cleanup(struct net_device *netdev)
ibmvnic_napi_disable(adapter); ibmvnic_napi_disable(adapter);
ibmvnic_disable_irqs(adapter); ibmvnic_disable_irqs(adapter);
clean_rx_pools(adapter);
clean_tx_pools(adapter);
} }
static int __ibmvnic_close(struct net_device *netdev) static int __ibmvnic_close(struct net_device *netdev)
...@@ -1460,6 +1648,8 @@ static int ibmvnic_close(struct net_device *netdev) ...@@ -1460,6 +1648,8 @@ static int ibmvnic_close(struct net_device *netdev)
rc = __ibmvnic_close(netdev); rc = __ibmvnic_close(netdev);
ibmvnic_cleanup(netdev); ibmvnic_cleanup(netdev);
clean_rx_pools(adapter);
clean_tx_pools(adapter);
return rc; return rc;
} }
...@@ -2036,9 +2226,9 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason) ...@@ -2036,9 +2226,9 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
static int do_reset(struct ibmvnic_adapter *adapter, static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state) struct ibmvnic_rwi *rwi, u32 reset_state)
{ {
struct net_device *netdev = adapter->netdev;
u64 old_num_rx_queues, old_num_tx_queues; u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots; u64 old_num_rx_slots, old_num_tx_slots;
struct net_device *netdev = adapter->netdev;
int rc; int rc;
netdev_dbg(adapter->netdev, netdev_dbg(adapter->netdev,
...@@ -2188,8 +2378,6 @@ static int do_reset(struct ibmvnic_adapter *adapter, ...@@ -2188,8 +2378,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
!adapter->rx_pool || !adapter->rx_pool ||
!adapter->tso_pool || !adapter->tso_pool ||
!adapter->tx_pool) { !adapter->tx_pool) {
release_rx_pools(adapter);
release_tx_pools(adapter);
release_napi(adapter); release_napi(adapter);
release_vpd_data(adapter); release_vpd_data(adapter);
...@@ -2198,16 +2386,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, ...@@ -2198,16 +2386,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out; goto out;
} else { } else {
rc = reset_tx_pools(adapter); rc = init_tx_pools(netdev);
if (rc) { if (rc) {
netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", netdev_dbg(netdev,
"init tx pools failed (%d)\n",
rc); rc);
goto out; goto out;
} }
rc = reset_rx_pools(adapter); rc = init_rx_pools(netdev);
if (rc) { if (rc) {
netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", netdev_dbg(netdev,
"init rx pools failed (%d)\n",
rc); rc);
goto out; goto out;
} }
...@@ -4770,9 +4960,10 @@ static void handle_query_map_rsp(union ibmvnic_crq *crq, ...@@ -4770,9 +4960,10 @@ static void handle_query_map_rsp(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc); dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
return; return;
} }
netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n", netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages, crq->query_map_rsp.page_size,
crq->query_map_rsp.free_pages); __be32_to_cpu(crq->query_map_rsp.tot_pages),
__be32_to_cpu(crq->query_map_rsp.free_pages));
} }
static void handle_query_cap_rsp(union ibmvnic_crq *crq, static void handle_query_cap_rsp(union ibmvnic_crq *crq,
...@@ -5519,6 +5710,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) ...@@ -5519,6 +5710,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev; adapter->vdev = dev;
adapter->netdev = netdev; adapter->netdev = netdev;
adapter->login_pending = false; adapter->login_pending = false;
memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
/* map_ids start at 1, so ensure map_id 0 is always "in-use" */
bitmap_set(adapter->map_ids, 0, 1);
ether_addr_copy(adapter->mac_addr, mac_addr_p); ether_addr_copy(adapter->mac_addr, mac_addr_p);
ether_addr_copy(netdev->dev_addr, adapter->mac_addr); ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
...@@ -5539,6 +5733,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) ...@@ -5539,6 +5733,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_completion(&adapter->reset_done); init_completion(&adapter->reset_done);
init_completion(&adapter->stats_done); init_completion(&adapter->stats_done);
clear_bit(0, &adapter->resetting); clear_bit(0, &adapter->resetting);
adapter->prev_rx_buf_sz = 0;
adapter->prev_mtu = 0;
init_success = false; init_success = false;
do { do {
...@@ -5639,6 +5835,8 @@ static void ibmvnic_remove(struct vio_dev *dev) ...@@ -5639,6 +5835,8 @@ static void ibmvnic_remove(struct vio_dev *dev)
unregister_netdevice(netdev); unregister_netdevice(netdev);
release_resources(adapter); release_resources(adapter);
release_rx_pools(adapter);
release_tx_pools(adapter);
release_sub_crqs(adapter, 1); release_sub_crqs(adapter, 1);
release_crq_queue(adapter); release_crq_queue(adapter);
......
...@@ -827,7 +827,7 @@ struct ibmvnic_rx_buff { ...@@ -827,7 +827,7 @@ struct ibmvnic_rx_buff {
struct ibmvnic_rx_pool { struct ibmvnic_rx_pool {
struct ibmvnic_rx_buff *rx_buff; struct ibmvnic_rx_buff *rx_buff;
int size; int size; /* # of buffers in the pool */
int index; int index;
int buff_size; int buff_size;
atomic_t available; atomic_t available;
...@@ -967,6 +967,7 @@ struct ibmvnic_adapter { ...@@ -967,6 +967,7 @@ struct ibmvnic_adapter {
u64 min_mtu; u64 min_mtu;
u64 max_mtu; u64 max_mtu;
u64 req_mtu; u64 req_mtu;
u64 prev_mtu;
u64 max_multicast_filters; u64 max_multicast_filters;
u64 vlan_header_insertion; u64 vlan_header_insertion;
u64 rx_vlan_header_insertion; u64 rx_vlan_header_insertion;
...@@ -979,13 +980,18 @@ struct ibmvnic_adapter { ...@@ -979,13 +980,18 @@ struct ibmvnic_adapter {
u64 opt_tx_entries_per_subcrq; u64 opt_tx_entries_per_subcrq;
u64 opt_rxba_entries_per_subcrq; u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req; __be64 tx_rx_desc_req;
u8 map_id; #define MAX_MAP_ID 255
DECLARE_BITMAP(map_ids, MAX_MAP_ID);
u32 num_active_rx_scrqs; u32 num_active_rx_scrqs;
u32 num_active_rx_pools; u32 num_active_rx_pools;
u32 num_active_rx_napi; u32 num_active_rx_napi;
u32 num_active_tx_scrqs; u32 num_active_tx_scrqs;
u32 num_active_tx_pools; u32 num_active_tx_pools;
u32 prev_rx_pool_size;
u32 prev_tx_pool_size;
u32 cur_rx_buf_sz; u32 cur_rx_buf_sz;
u32 prev_rx_buf_sz;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
enum vnic_state state; enum vnic_state state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment