Commit 116ca924 authored by Vishal Kulkarni's avatar Vishal Kulkarni Committed by David S. Miller

cxgb4: fix checks for max queues to allocate

Hardware can support more than 8 queues currently limited by
netif_get_num_default_rss_queues(). So, rework and fix checks for max
number of queues to allocate. The checks should be based on how many are
actually supported by hardware, OR the number of online cpus; whichever
is lower.

Fixes: 5952dde7 ("cxgb4: set maximal number of default RSS queues")
Signed-off-by: Vishal Kulkarni <vishal@chelsio.com>"
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 20d8bb0d
...@@ -5381,12 +5381,11 @@ static inline bool is_x_10g_port(const struct link_config *lc) ...@@ -5381,12 +5381,11 @@ static inline bool is_x_10g_port(const struct link_config *lc)
static int cfg_queues(struct adapter *adap) static int cfg_queues(struct adapter *adap)
{ {
u32 avail_qsets, avail_eth_qsets, avail_uld_qsets; u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
u32 i, n10g = 0, qidx = 0, n1g = 0;
u32 ncpus = num_online_cpus();
u32 niqflint, neq, num_ulds; u32 niqflint, neq, num_ulds;
struct sge *s = &adap->sge; struct sge *s = &adap->sge;
u32 i, n10g = 0, qidx = 0; u32 q10g = 0, q1g;
#ifndef CONFIG_CHELSIO_T4_DCB
int q10g = 0;
#endif
/* Reduce memory usage in kdump environment, disable all offload. */ /* Reduce memory usage in kdump environment, disable all offload. */
if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) { if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
...@@ -5424,44 +5423,50 @@ static int cfg_queues(struct adapter *adap) ...@@ -5424,44 +5423,50 @@ static int cfg_queues(struct adapter *adap)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS); avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
/* We default to 1 queue per non-10G port and up to # of cores queues
* per 10G port.
*/
if (n10g)
q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
n1g = adap->params.nports - n10g;
#ifdef CONFIG_CHELSIO_T4_DCB #ifdef CONFIG_CHELSIO_T4_DCB
/* For Data Center Bridging support we need to be able to support up /* For Data Center Bridging support we need to be able to support up
* to 8 Traffic Priorities; each of which will be assigned to its * to 8 Traffic Priorities; each of which will be assigned to its
* own TX Queue in order to prevent Head-Of-Line Blocking. * own TX Queue in order to prevent Head-Of-Line Blocking.
*/ */
q1g = 8;
if (adap->params.nports * 8 > avail_eth_qsets) { if (adap->params.nports * 8 > avail_eth_qsets) {
dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n", dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
avail_eth_qsets, adap->params.nports * 8); avail_eth_qsets, adap->params.nports * 8);
return -ENOMEM; return -ENOMEM;
} }
for_each_port(adap, i) { if (adap->params.nports * ncpus < avail_eth_qsets)
struct port_info *pi = adap2pinfo(adap, i); q10g = max(8U, ncpus);
else
q10g = max(8U, q10g);
pi->first_qset = qidx; while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
pi->nqsets = is_kdump_kernel() ? 1 : 8; q10g--;
qidx += pi->nqsets;
}
#else /* !CONFIG_CHELSIO_T4_DCB */
/* We default to 1 queue per non-10G port and up to # of cores queues
* per 10G port.
*/
if (n10g)
q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
if (is_kdump_kernel()) #else /* !CONFIG_CHELSIO_T4_DCB */
q1g = 1;
q10g = min(q10g, ncpus);
#endif /* !CONFIG_CHELSIO_T4_DCB */
if (is_kdump_kernel()) {
q10g = 1; q10g = 1;
q1g = 1;
}
for_each_port(adap, i) { for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i); struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx; pi->first_qset = qidx;
pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
qidx += pi->nqsets; qidx += pi->nqsets;
} }
#endif /* !CONFIG_CHELSIO_T4_DCB */
s->ethqsets = qidx; s->ethqsets = qidx;
s->max_ethqsets = qidx; /* MSI-X may lower it later */ s->max_ethqsets = qidx; /* MSI-X may lower it later */
...@@ -5473,7 +5478,7 @@ static int cfg_queues(struct adapter *adap) ...@@ -5473,7 +5478,7 @@ static int cfg_queues(struct adapter *adap)
* capped by the number of available cores. * capped by the number of available cores.
*/ */
num_ulds = adap->num_uld + adap->num_ofld_uld; num_ulds = adap->num_uld + adap->num_ofld_uld;
i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus()); i = min_t(u32, MAX_OFLD_QSETS, ncpus);
avail_uld_qsets = roundup(i, adap->params.nports); avail_uld_qsets = roundup(i, adap->params.nports);
if (avail_qsets < num_ulds * adap->params.nports) { if (avail_qsets < num_ulds * adap->params.nports) {
adap->params.offload = 0; adap->params.offload = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment