Commit de161803 authored by Ido Shamay's avatar Ido Shamay Committed by David S. Miller

net/mlx4_core: Move affinity hints to mlx4_core ownership

Now that EQs management is in the sole responsibility of mlx4_core,
the IRQ affinity hints configuration should be in its hands as well.
request_irq is called only once by the first consumer (maybe mlx4_ib),
so mlx4_en passes the affinity mask too late. We also need to request
vectors according to the cores we want to run on.

mlx4_core distribution of IRQs to cores is straight forward,
EQ(i)->IRQ will set affinity hint to core i.
Consumers need to request EQ vectors, according to their cores
considerations (NUMA).
Signed-off-by: default avatarIdo Shamay <idos@mellanox.com>
Signed-off-by: default avatarMatan Barak <matanb@mellanox.com>
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent c66fa19c
...@@ -114,7 +114,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -114,7 +114,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
if (cq->is_tx == RX) { if (cq->is_tx == RX) {
if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port, if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
cq->vector)) { cq->vector)) {
cq->vector = cq_idx; cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
err = mlx4_assign_eq(mdev->dev, priv->port, err = mlx4_assign_eq(mdev->dev, priv->port,
&cq->vector); &cq->vector);
...@@ -160,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -160,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq, netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT); NAPI_POLL_WEIGHT);
} else { } else {
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
err = irq_set_affinity_hint(cq->mcq.irq,
ring->affinity_mask);
if (err)
mlx4_warn(mdev, "Failed setting affinity hint\n");
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi); napi_hash_add(&cq->napi);
} }
...@@ -205,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) ...@@ -205,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) { if (!cq->is_tx) {
napi_hash_del(&cq->napi); napi_hash_del(&cq->napi);
synchronize_rcu(); synchronize_rcu();
irq_set_affinity_hint(cq->mcq.irq, NULL);
} }
netif_napi_del(&cq->napi); netif_napi_del(&cq->napi);
......
...@@ -221,6 +221,20 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave, ...@@ -221,6 +221,20 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
slave_event(dev, slave, eqe); slave_event(dev, slave, eqe);
} }
static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
{
int hint_err;
struct mlx4_dev *dev = &priv->dev;
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
return;
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
if (hint_err)
mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
}
int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port) int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
{ {
struct mlx4_eqe eqe; struct mlx4_eqe eqe;
...@@ -1092,6 +1106,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) ...@@ -1092,6 +1106,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) { if (eq_table->eq[i].have_irq) {
free_cpumask_var(eq_table->eq[i].affinity_mask);
#if defined(CONFIG_SMP)
irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
#endif
free_irq(eq_table->eq[i].irq, eq_table->eq + i); free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0; eq_table->eq[i].have_irq = 0;
} }
...@@ -1483,6 +1501,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector) ...@@ -1483,6 +1501,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
clear_bit(*prequested_vector, priv->msix_ctl.pool_bm); clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
*prequested_vector = -1; *prequested_vector = -1;
} else { } else {
#if defined(CONFIG_SMP)
mlx4_set_eq_affinity_hint(priv, *prequested_vector);
#endif
eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1); eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
priv->eq_table.eq[*prequested_vector].have_irq = 1; priv->eq_table.eq[*prequested_vector].have_irq = 1;
} }
......
...@@ -2481,6 +2481,36 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) ...@@ -2481,6 +2481,36 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
return err; return err;
} }
static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
{
int requested_cpu = 0;
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eq *eq;
int off = 0;
int i;
if (eqn > dev->caps.num_comp_vectors)
return -EINVAL;
for (i = 1; i < port; i++)
off += mlx4_get_eqs_per_port(dev, i);
requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
/* Meaning EQs are shared, and this call comes from the second port */
if (requested_cpu < 0)
return 0;
eq = &priv->eq_table.eq[eqn];
if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(requested_cpu, eq->affinity_mask);
return 0;
}
static void mlx4_enable_msi_x(struct mlx4_dev *dev) static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{ {
struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_priv *priv = mlx4_priv(dev);
...@@ -2525,9 +2555,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) ...@@ -2525,9 +2555,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
dev->caps.num_ports); dev->caps.num_ports);
/* We don't set affinity hint when there
* aren't enough EQs
*/
} else { } else {
set_bit(port, set_bit(port,
priv->eq_table.eq[i].actv_ports.ports); priv->eq_table.eq[i].actv_ports.ports);
if (mlx4_init_affinity_hint(dev, port + 1, i))
mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
i);
} }
/* We divide the Eqs evenly between the two ports. /* We divide the Eqs evenly between the two ports.
* (dev->caps.num_comp_vectors / dev->caps.num_ports) * (dev->caps.num_comp_vectors / dev->caps.num_ports)
......
...@@ -399,6 +399,7 @@ struct mlx4_eq { ...@@ -399,6 +399,7 @@ struct mlx4_eq {
struct mlx4_eq_tasklet tasklet_ctx; struct mlx4_eq_tasklet tasklet_ctx;
struct mlx4_active_ports actv_ports; struct mlx4_active_ports actv_ports;
u32 ref_count; u32 ref_count;
cpumask_var_t affinity_mask;
}; };
struct mlx4_slave_eqe { struct mlx4_slave_eqe {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment