Commit 98846e69 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4'

Amir Vadai says:

====================
Mellanox EN driver fixes 2014-06-23

Below are some fixes to patches submitted to 3.16.

First patch is according to discussions with Ben [1] and Thomas [2] - to do not
use affinity notifier, since it breaks RFS. Instead detect changes in IRQ
affinity map, by checking if current CPU is set in affinity map on NAPI poll.
The two other patches fix some bugs introduced in commit [3].

Patches were applied and tested over commit dba63115: ('powerpc: bpf: Fix the
broken LD_VLAN_TAG_PRESENT test')
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1b037474 bb273617
......@@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free);
cq->irq = priv->eq_table.eq[cq->vector].irq;
cq->irq_affinity_change = false;
return 0;
err_radix:
......
......@@ -128,6 +128,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
name);
}
cq->irq_desc =
irq_to_desc(mlx4_eq_get_irq(mdev->dev,
cq->vector));
}
} else {
cq->vector = (cq->ring + 1 + priv->port) %
......@@ -187,8 +191,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector) {
if (!cq->is_tx)
irq_set_affinity_hint(cq->mcq.irq, NULL);
mlx4_release_eq(priv->mdev->dev, cq->vector);
}
cq->vector = 0;
......@@ -204,6 +206,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) {
napi_hash_del(&cq->napi);
synchronize_rcu();
irq_set_affinity_hint(cq->mcq.irq, NULL);
}
netif_napi_del(&cq->napi);
......
......@@ -40,6 +40,7 @@
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/irq.h>
#include "mlx4_en.h"
......@@ -896,16 +897,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done == budget) {
int cpu_curr;
const struct cpumask *aff;
INC_PERF_COUNTER(priv->pstats.napi_quota);
if (unlikely(cq->mcq.irq_affinity_change)) {
cq->mcq.irq_affinity_change = false;
cpu_curr = smp_processor_id();
aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
/* Current cpu is not according to smp_irq_affinity -
* probably affinity changed. need to stop this NAPI
* poll, and restart it on the right CPU
*/
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return 0;
}
} else {
/* Done for now */
cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
......
......@@ -474,15 +474,9 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done < budget) {
/* Done for now */
cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return done;
} else if (unlikely(cq->mcq.irq_affinity_change)) {
cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return 0;
}
return budget;
}
......
......@@ -53,11 +53,6 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
struct mlx4_irq_notify {
void *arg;
struct irq_affinity_notify notify;
};
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
......@@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
iounmap(priv->clr_base);
}
static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct mlx4_irq_notify *n = container_of(notify,
struct mlx4_irq_notify,
notify);
struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
struct radix_tree_iter iter;
void **slot;
radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
if (cq->irq == notify->irq)
cq->irq_affinity_change = true;
}
}
static void mlx4_release_irq_notifier(struct kref *ref)
{
struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
notify.kref);
kfree(n);
}
static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
struct mlx4_dev *dev, int irq)
{
struct mlx4_irq_notify *irq_notifier = NULL;
int err = 0;
irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
if (!irq_notifier) {
mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
irq);
return;
}
irq_notifier->notify.irq = irq;
irq_notifier->notify.notify = mlx4_irq_notifier_notify;
irq_notifier->notify.release = mlx4_release_irq_notifier;
irq_notifier->arg = priv;
err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
if (err) {
kfree(irq_notifier);
irq_notifier = NULL;
mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
}
}
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
......@@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
continue;
/*we dont want to break here*/
}
mlx4_assign_irq_notifier(priv, dev,
priv->eq_table.eq[vec].irq);
eq_set_ci(&priv->eq_table.eq[vec], 1);
}
......@@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
}
EXPORT_SYMBOL(mlx4_assign_eq);
int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
return priv->eq_table.eq[vec].irq;
}
EXPORT_SYMBOL(mlx4_eq_get_irq);
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
......@@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
Belonging to a legacy EQ*/
mutex_lock(&priv->msix_ctl.pool_lock);
if (priv->msix_ctl.pool_bm & 1ULL << i) {
irq_set_affinity_notifier(
priv->eq_table.eq[vec].irq,
NULL);
free_irq(priv->eq_table.eq[vec].irq,
&priv->eq_table.eq[vec]);
priv->msix_ctl.pool_bm &= ~(1ULL << i);
......
......@@ -343,6 +343,7 @@ struct mlx4_en_cq {
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
#endif /* CONFIG_NET_RX_BUSY_POLL */
struct irq_desc *irq_desc;
};
struct mlx4_en_port_profile {
......
......@@ -578,8 +578,6 @@ struct mlx4_cq {
u32 cons_index;
u16 irq;
bool irq_affinity_change;
__be32 *set_ci_db;
__be32 *arm_db;
int arm_sn;
......@@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
int *vector);
void mlx4_release_eq(struct mlx4_dev *dev, int vec);
int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
int mlx4_get_phys_port_id(struct mlx4_dev *dev);
int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
......
......@@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
i %= num_online_cpus();
if (!cpumask_of_node(numa_node)) {
if (numa_node == -1 || !cpumask_of_node(numa_node)) {
/* Use all online cpu's for non numa aware system */
cpumask_copy(mask, cpu_online_mask);
} else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment