Commit c66fa19c authored by Matan Barak's avatar Matan Barak Committed by David S. Miller

net/mlx4: Add EQ pool

Previously, mlx4_en allocated EQs and used them exclusively.
This affected RoCE performance, as applications which are
events sensitive were limited to use only the legacy EQs.

Change that by introducing an EQ pool. This pool is managed
by mlx4_core. EQs are assigned to ports (when there are limited
number of EQs, multiple ports could be assigned to the same EQs).

An exception to this rule is the ASYNC EQ which handles various events.

Legacy EQs are completely removed as all EQs could be shared.

When a consumer (mlx4_ib/mlx4_en) requests an EQ, it asks for
EQ serving on a specific port. The core driver calculates which
EQ should be assigned to that request.

Because IRQs are shared between IB and Ethernet modules, their
names only include the PCI device BDF address.
Signed-off-by: default avatarMatan Barak <matanb@mellanox.com>
Signed-off-by: default avatarIdo Shamay <idos@mellanox.com>
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 48564135
...@@ -2041,77 +2041,52 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev) ...@@ -2041,77 +2041,52 @@ static void init_pkeys(struct mlx4_ib_dev *ibdev)
static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{ {
char name[80]; int i, j, eq = 0, total_eqs = 0;
int eq_per_port = 0;
int added_eqs = 0;
int total_eqs = 0;
int i, j, eq;
/* Legacy mode or comp_pool is not large enough */
if (dev->caps.comp_pool == 0 ||
dev->caps.num_ports > dev->caps.comp_pool)
return;
eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
/* Init eq table */
added_eqs = 0;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
added_eqs += eq_per_port;
total_eqs = dev->caps.num_comp_vectors + added_eqs;
ibdev->eq_table = kzalloc(total_eqs * sizeof(int), GFP_KERNEL); ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors,
sizeof(ibdev->eq_table[0]), GFP_KERNEL);
if (!ibdev->eq_table) if (!ibdev->eq_table)
return; return;
ibdev->eq_added = added_eqs; for (i = 1; i <= dev->caps.num_ports; i++) {
for (j = 0; j < mlx4_get_eqs_per_port(dev, i);
eq = 0; j++, total_eqs++) {
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) { if (i > 1 && mlx4_is_eq_shared(dev, total_eqs))
for (j = 0; j < eq_per_port; j++) { continue;
snprintf(name, sizeof(name), "mlx4-ib-%d-%d@%s", ibdev->eq_table[eq] = total_eqs;
i, j, dev->persist->pdev->bus->name); if (!mlx4_assign_eq(dev, i,
/* Set IRQ for specific name (per ring) */ &ibdev->eq_table[eq]))
if (mlx4_assign_eq(dev, name, NULL, eq++;
&ibdev->eq_table[eq])) { else
/* Use legacy (same as mlx4_en driver) */ ibdev->eq_table[eq] = -1;
pr_warn("Can't allocate EQ %d; reverting to legacy\n", eq);
ibdev->eq_table[eq] =
(eq % dev->caps.num_comp_vectors);
}
eq++;
} }
} }
/* Fill the reset of the vector with legacy EQ */ for (i = eq; i < dev->caps.num_comp_vectors;
for (i = 0, eq = added_eqs; i < dev->caps.num_comp_vectors; i++) ibdev->eq_table[i++] = -1)
ibdev->eq_table[eq++] = i; ;
/* Advertise the new number of EQs to clients */ /* Advertise the new number of EQs to clients */
ibdev->ib_dev.num_comp_vectors = total_eqs; ibdev->ib_dev.num_comp_vectors = eq;
} }
static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
{ {
int i; int i;
int total_eqs = ibdev->ib_dev.num_comp_vectors;
/* no additional eqs were added */ /* no eqs were allocated */
if (!ibdev->eq_table) if (!ibdev->eq_table)
return; return;
/* Reset the advertised EQ number */ /* Reset the advertised EQ number */
ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; ibdev->ib_dev.num_comp_vectors = 0;
/* Free only the added eqs */ for (i = 0; i < total_eqs; i++)
for (i = 0; i < ibdev->eq_added; i++) {
/* Don't free legacy eqs if used */
if (ibdev->eq_table[i] <= dev->caps.num_comp_vectors)
continue;
mlx4_release_eq(dev, ibdev->eq_table[i]); mlx4_release_eq(dev, ibdev->eq_table[i]);
}
kfree(ibdev->eq_table); kfree(ibdev->eq_table);
ibdev->eq_table = NULL;
} }
static void *mlx4_ib_add(struct mlx4_dev *dev) static void *mlx4_ib_add(struct mlx4_dev *dev)
......
...@@ -523,7 +523,6 @@ struct mlx4_ib_dev { ...@@ -523,7 +523,6 @@ struct mlx4_ib_dev {
struct mlx4_ib_iboe iboe; struct mlx4_ib_iboe iboe;
int counters[MLX4_MAX_PORTS]; int counters[MLX4_MAX_PORTS];
int *eq_table; int *eq_table;
int eq_added;
struct kobject *iov_parent; struct kobject *iov_parent;
struct kobject *ports_parent; struct kobject *ports_parent;
struct kobject *dev_ports_parent[MLX4_MFUNC_MAX]; struct kobject *dev_ports_parent[MLX4_MFUNC_MAX];
......
...@@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, ...@@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
u64 mtt_addr; u64 mtt_addr;
int err; int err;
if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool) if (vector >= dev->caps.num_comp_vectors)
return -EINVAL; return -EINVAL;
cq->vector = vector; cq->vector = vector;
...@@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, ...@@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq_context->flags |= cpu_to_be32(1 << 19); cq_context->flags |= cpu_to_be32(1 << 19);
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt); mtt_addr = mlx4_mtt_addr(dev, mtt);
...@@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, ...@@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free); init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet; cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv = cq->tasklet_ctx.priv =
&priv->eq_table.eq[cq->vector].tasklet_ctx; &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list); INIT_LIST_HEAD(&cq->tasklet_ctx.list);
cq->irq = priv->eq_table.eq[cq->vector].irq; cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
return 0; return 0;
err_radix: err_radix:
...@@ -368,7 +368,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) ...@@ -368,7 +368,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err) if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
synchronize_irq(priv->eq_table.eq[cq->vector].irq); synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
spin_lock_irq(&cq_table->lock); spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn); radix_tree_delete(&cq_table->tree, cq->cqn);
......
...@@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, ...@@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring; cq->ring = ring;
cq->is_tx = mode; cq->is_tx = mode;
cq->vector = mdev->dev->caps.num_comp_vectors;
/* Allocate HW buffers on provided NUMA node. /* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow. * dev->numa_node is used in mtt range allocation flow.
...@@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int err = 0; int err = 0;
char name[25]; char name[25];
int timestamp_en = 0; int timestamp_en = 0;
struct cpu_rmap *rmap = bool assigned_eq = false;
#ifdef CONFIG_RFS_ACCEL
priv->dev->rx_cpu_rmap;
#else
NULL;
#endif
cq->dev = mdev->pndev[priv->port]; cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db; cq->mcq.set_ci_db = cq->wqres.db.db;
...@@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
memset(cq->buf, 0, cq->buf_size); memset(cq->buf, 0, cq->buf_size);
if (cq->is_tx == RX) { if (cq->is_tx == RX) {
if (mdev->dev->caps.comp_pool) { if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
if (!cq->vector) { cq->vector)) {
sprintf(name, "%s-%d", priv->dev->name, cq->vector = cq_idx;
cq->ring);
/* Set IRQ for specific name (per ring) */ err = mlx4_assign_eq(mdev->dev, priv->port,
if (mlx4_assign_eq(mdev->dev, name, rmap, &cq->vector);
&cq->vector)) { if (err) {
cq->vector = (cq->ring + 1 + priv->port) mlx4_err(mdev, "Failed assigning an EQ to %s\n",
% mdev->dev->caps.num_comp_vectors; name);
mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", goto free_eq;
name);
}
} }
} else {
cq->vector = (cq->ring + 1 + priv->port) % assigned_eq = true;
mdev->dev->caps.num_comp_vectors;
} }
cq->irq_desc = cq->irq_desc =
...@@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
cq->vector, 0, timestamp_en); cq->vector, 0, timestamp_en);
if (err) if (err)
return err; goto free_eq;
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event; cq->mcq.event = mlx4_en_cq_event;
...@@ -182,6 +174,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -182,6 +174,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
napi_enable(&cq->napi); napi_enable(&cq->napi);
return 0; return 0;
free_eq:
if (assigned_eq)
mlx4_release_eq(mdev->dev, cq->vector);
cq->vector = mdev->dev->caps.num_comp_vectors;
return err;
} }
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
...@@ -191,9 +189,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) ...@@ -191,9 +189,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector) { if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
cq->is_tx == RX)
mlx4_release_eq(priv->mdev->dev, cq->vector); mlx4_release_eq(priv->mdev->dev, cq->vector);
}
cq->vector = 0; cq->vector = 0;
cq->buf_size = 0; cq->buf_size = 0;
cq->buf = NULL; cq->buf = NULL;
......
...@@ -1958,7 +1958,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) ...@@ -1958,7 +1958,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
int i; int i;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
priv->dev->rx_cpu_rmap = NULL; priv->dev->rx_cpu_rmap = NULL;
#endif #endif
...@@ -2016,11 +2015,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) ...@@ -2016,11 +2015,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
} }
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
if (priv->mdev->dev->caps.comp_pool) { priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
if (!priv->dev->rx_cpu_rmap)
goto err;
}
#endif #endif
return 0; return 0;
......
...@@ -337,15 +337,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) ...@@ -337,15 +337,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
struct mlx4_dev *dev = mdev->dev; struct mlx4_dev *dev = mdev->dev;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
if (!dev->caps.comp_pool) num_of_eqs = max_t(int, MIN_RX_RINGS,
num_of_eqs = max_t(int, MIN_RX_RINGS, min_t(int,
min_t(int, mlx4_get_eqs_per_port(mdev->dev, i),
dev->caps.num_comp_vectors, DEF_RX_RINGS));
DEF_RX_RINGS));
else
num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
dev->caps.comp_pool/
dev->caps.num_ports) - 1;
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
min_t(int, num_of_eqs, min_t(int, num_of_eqs,
......
This diff is collapsed.
...@@ -2364,11 +2364,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) ...@@ -2364,11 +2364,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (err) { if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) { if (dev->flags & MLX4_FLAG_MSI_X) {
mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq); priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_warn(dev, "Trying again without MSI-X\n"); mlx4_warn(dev, "Trying again without MSI-X\n");
} else { } else {
mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq); priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
} }
...@@ -2486,9 +2486,10 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) ...@@ -2486,9 +2486,10 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries; struct msix_entry *entries;
int i; int i;
int port = 0;
if (msi_x) { if (msi_x) {
int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ; int nreq = dev->caps.num_ports * num_online_cpus() + 1;
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq); nreq);
...@@ -2503,20 +2504,49 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) ...@@ -2503,20 +2504,49 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
nreq); nreq);
if (nreq < 0) { if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
kfree(entries); kfree(entries);
goto no_msi; goto no_msi;
} else if (nreq < MSIX_LEGACY_SZ +
dev->caps.num_ports * MIN_MSIX_P_PORT) {
/*Working in legacy mode , all EQ's shared*/
dev->caps.comp_pool = 0;
dev->caps.num_comp_vectors = nreq - 1;
} else {
dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
} }
for (i = 0; i < nreq; ++i) /* 1 is reserved for events (asyncrounous EQ) */
priv->eq_table.eq[i].irq = entries[i].vector; dev->caps.num_comp_vectors = nreq - 1;
priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
dev->caps.num_ports);
for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
if (i == MLX4_EQ_ASYNC)
continue;
priv->eq_table.eq[i].irq =
entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
dev->caps.num_ports);
} else {
set_bit(port,
priv->eq_table.eq[i].actv_ports.ports);
}
/* We divide the Eqs evenly between the two ports.
* (dev->caps.num_comp_vectors / dev->caps.num_ports)
* refers to the number of Eqs per port
* (i.e eqs_per_port). Theoretically, we would like to
* write something like (i + 1) % eqs_per_port == 0.
* However, since there's an asynchronous Eq, we have
* to skip over it by comparing this condition to
* !!((i + 1) > MLX4_EQ_ASYNC).
*/
if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
((i + 1) %
(dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
!!((i + 1) > MLX4_EQ_ASYNC))
/* If dev->caps.num_comp_vectors < dev->caps.num_ports,
* everything is shared anyway.
*/
port++;
}
dev->flags |= MLX4_FLAG_MSI_X; dev->flags |= MLX4_FLAG_MSI_X;
...@@ -2526,10 +2556,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) ...@@ -2526,10 +2556,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
no_msi: no_msi:
dev->caps.num_comp_vectors = 1; dev->caps.num_comp_vectors = 1;
dev->caps.comp_pool = 0;
for (i = 0; i < 2; ++i) BUG_ON(MLX4_EQ_ASYNC >= 2);
for (i = 0; i < 2; ++i) {
priv->eq_table.eq[i].irq = dev->persist->pdev->irq; priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
if (i != MLX4_EQ_ASYNC) {
bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
dev->caps.num_ports);
}
}
} }
static int mlx4_init_port_info(struct mlx4_dev *dev, int port) static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
...@@ -2594,6 +2629,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) ...@@ -2594,6 +2629,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev, device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr); &info->port_mtu_attr);
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
info->rmap = NULL;
#endif
} }
static int mlx4_init_steering(struct mlx4_dev *dev) static int mlx4_init_steering(struct mlx4_dev *dev)
...@@ -3024,7 +3063,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, ...@@ -3024,7 +3063,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
if (err) if (err)
goto err_master_mfunc; goto err_master_mfunc;
priv->msix_ctl.pool_bm = 0; bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
mutex_init(&priv->msix_ctl.pool_lock); mutex_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev); mlx4_enable_msi_x(dev);
...@@ -3046,7 +3085,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, ...@@ -3046,7 +3085,6 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
!mlx4_is_mfunc(dev)) { !mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X; dev->flags &= ~MLX4_FLAG_MSI_X;
dev->caps.num_comp_vectors = 1; dev->caps.num_comp_vectors = 1;
dev->caps.comp_pool = 0;
pci_disable_msix(pdev); pci_disable_msix(pdev);
err = mlx4_setup_hca(dev); err = mlx4_setup_hca(dev);
} }
......
...@@ -287,6 +287,12 @@ struct mlx4_icm_table { ...@@ -287,6 +287,12 @@ struct mlx4_icm_table {
#define MLX4_CQE_SIZE_MASK_STRIDE 0x3 #define MLX4_CQE_SIZE_MASK_STRIDE 0x3
#define MLX4_EQE_SIZE_MASK_STRIDE 0x30 #define MLX4_EQE_SIZE_MASK_STRIDE 0x30
#define MLX4_EQ_ASYNC 0
#define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \
!!((int)(vector) >= MLX4_EQ_ASYNC))
#define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \
!!((int)(vector) >= MLX4_EQ_ASYNC))
/* /*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/ */
...@@ -391,6 +397,8 @@ struct mlx4_eq { ...@@ -391,6 +397,8 @@ struct mlx4_eq {
struct mlx4_buf_list *page_list; struct mlx4_buf_list *page_list;
struct mlx4_mtt mtt; struct mlx4_mtt mtt;
struct mlx4_eq_tasklet tasklet_ctx; struct mlx4_eq_tasklet tasklet_ctx;
struct mlx4_active_ports actv_ports;
u32 ref_count;
}; };
struct mlx4_slave_eqe { struct mlx4_slave_eqe {
...@@ -808,6 +816,7 @@ struct mlx4_port_info { ...@@ -808,6 +816,7 @@ struct mlx4_port_info {
struct mlx4_vlan_table vlan_table; struct mlx4_vlan_table vlan_table;
struct mlx4_roce_gid_table gid_table; struct mlx4_roce_gid_table gid_table;
int base_qpn; int base_qpn;
struct cpu_rmap *rmap;
}; };
struct mlx4_sense { struct mlx4_sense {
...@@ -818,7 +827,7 @@ struct mlx4_sense { ...@@ -818,7 +827,7 @@ struct mlx4_sense {
}; };
struct mlx4_msix_ctl { struct mlx4_msix_ctl {
u64 pool_bm; DECLARE_BITMAP(pool_bm, MAX_MSIX);
struct mutex pool_lock; struct mutex pool_lock;
}; };
......
...@@ -338,7 +338,7 @@ struct mlx4_en_cq { ...@@ -338,7 +338,7 @@ struct mlx4_en_cq {
struct napi_struct napi; struct napi_struct napi;
int size; int size;
int buf_size; int buf_size;
unsigned vector; int vector;
enum cq_type is_tx; enum cq_type is_tx;
u16 moder_time; u16 moder_time;
u16 moder_cnt; u16 moder_cnt;
......
...@@ -46,8 +46,9 @@ ...@@ -46,8 +46,9 @@
#define MAX_MSIX_P_PORT 17 #define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64 #define MAX_MSIX 64
#define MSIX_LEGACY_SZ 4
#define MIN_MSIX_P_PORT 5 #define MIN_MSIX_P_PORT 5
#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
(dev_cap).num_ports * MIN_MSIX_P_PORT)
#define MLX4_MAX_100M_UNITS_VAL 255 /* #define MLX4_MAX_100M_UNITS_VAL 255 /*
* work around: can't set values * work around: can't set values
...@@ -528,7 +529,6 @@ struct mlx4_caps { ...@@ -528,7 +529,6 @@ struct mlx4_caps {
int num_eqs; int num_eqs;
int reserved_eqs; int reserved_eqs;
int num_comp_vectors; int num_comp_vectors;
int comp_pool;
int num_mpts; int num_mpts;
int max_fmr_maps; int max_fmr_maps;
int num_mtts; int num_mtts;
...@@ -1332,10 +1332,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, ...@@ -1332,10 +1332,13 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr);
int mlx4_SYNC_TPT(struct mlx4_dev *dev); int mlx4_SYNC_TPT(struct mlx4_dev *dev);
int mlx4_test_interrupts(struct mlx4_dev *dev); int mlx4_test_interrupts(struct mlx4_dev *dev);
int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port);
int *vector); bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector);
struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port);
int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec); void mlx4_release_eq(struct mlx4_dev *dev, int vec);
int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector);
int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec); int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
int mlx4_get_phys_port_id(struct mlx4_dev *dev); int mlx4_get_phys_port_id(struct mlx4_dev *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment