Commit fac70d51 authored by Eli Cohen's avatar Eli Cohen Committed by Roland Dreier

IB/mad: IBoE supports only QP1 (no QP0)

Since IBoE is using Ethernet as its link layer, there is no central
management entity so there is need for QP0.  QP1 is still needed since
it handles communications between CM agents.  This patch will skip QP0
and create only QP1 for IBoE ports.
Signed-off-by: default avatarEli Cohen <eli@mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 7b4c8769
......@@ -59,8 +59,8 @@ __ib_get_agent_port(struct ib_device *device, int port_num)
struct ib_agent_port_private *entry;
list_for_each_entry(entry, &ib_agent_port_list, port_list) {
if (entry->agent[0]->device == device &&
entry->agent[0]->port_num == port_num)
if (entry->agent[1]->device == device &&
entry->agent[1]->port_num == port_num)
return entry;
}
return NULL;
......@@ -155,14 +155,16 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
goto error1;
}
/* Obtain send only MAD agent for SMI QP */
port_priv->agent[0] = ib_register_mad_agent(device, port_num,
IB_QPT_SMI, NULL, 0,
&agent_send_handler,
NULL, NULL);
if (IS_ERR(port_priv->agent[0])) {
ret = PTR_ERR(port_priv->agent[0]);
goto error2;
if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) {
/* Obtain send only MAD agent for SMI QP */
port_priv->agent[0] = ib_register_mad_agent(device, port_num,
IB_QPT_SMI, NULL, 0,
&agent_send_handler,
NULL, NULL);
if (IS_ERR(port_priv->agent[0])) {
ret = PTR_ERR(port_priv->agent[0]);
goto error2;
}
}
/* Obtain send only MAD agent for GSI QP */
......@@ -182,7 +184,8 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
return 0;
error3:
ib_unregister_mad_agent(port_priv->agent[0]);
if (port_priv->agent[0])
ib_unregister_mad_agent(port_priv->agent[0]);
error2:
kfree(port_priv);
error1:
......@@ -205,7 +208,9 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
ib_unregister_mad_agent(port_priv->agent[1]);
ib_unregister_mad_agent(port_priv->agent[0]);
if (port_priv->agent[0])
ib_unregister_mad_agent(port_priv->agent[0]);
kfree(port_priv);
return 0;
}
......@@ -2598,6 +2598,9 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
struct ib_mad_private *recv;
struct ib_mad_list_head *mad_list;
if (!qp_info->qp)
return;
while (!list_empty(&qp_info->recv_queue.list)) {
mad_list = list_entry(qp_info->recv_queue.list.next,
......@@ -2639,6 +2642,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
for (i = 0; i < IB_MAD_QPS_CORE; i++) {
qp = port_priv->qp_info[i].qp;
if (!qp)
continue;
/*
* PKey index for QP1 is irrelevant but
* one is needed for the Reset to Init transition
......@@ -2680,6 +2686,9 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
}
for (i = 0; i < IB_MAD_QPS_CORE; i++) {
if (!port_priv->qp_info[i].qp)
continue;
ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
if (ret) {
printk(KERN_ERR PFX "Couldn't post receive WRs\n");
......@@ -2758,6 +2767,9 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
{
if (!qp_info->qp)
return;
ib_destroy_qp(qp_info->qp);
kfree(qp_info->snoop_table);
}
......@@ -2773,6 +2785,7 @@ static int ib_mad_port_open(struct ib_device *device,
struct ib_mad_port_private *port_priv;
unsigned long flags;
char name[sizeof "ib_mad123"];
int has_smi;
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
......@@ -2788,7 +2801,11 @@ static int ib_mad_port_open(struct ib_device *device,
init_mad_qp(port_priv, &port_priv->qp_info[0]);
init_mad_qp(port_priv, &port_priv->qp_info[1]);
cq_size = (mad_sendq_size + mad_recvq_size) * 2;
cq_size = mad_sendq_size + mad_recvq_size;
has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
if (has_smi)
cq_size *= 2;
port_priv->cq = ib_create_cq(port_priv->device,
ib_mad_thread_completion_handler,
NULL, port_priv, cq_size, 0);
......@@ -2812,9 +2829,11 @@ static int ib_mad_port_open(struct ib_device *device,
goto error5;
}
ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
if (ret)
goto error6;
if (has_smi) {
ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
if (ret)
goto error6;
}
ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
if (ret)
goto error7;
......
......@@ -774,6 +774,10 @@ static void mcast_event_handler(struct ib_event_handler *handler,
int index;
dev = container_of(handler, struct mcast_device, event_handler);
if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
IB_LINK_LAYER_INFINIBAND)
return;
index = event->element.port_num - dev->start_port;
switch (event->event) {
......@@ -796,6 +800,7 @@ static void mcast_add_one(struct ib_device *device)
struct mcast_device *dev;
struct mcast_port *port;
int i;
int count = 0;
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
......@@ -813,6 +818,9 @@ static void mcast_add_one(struct ib_device *device)
}
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
if (rdma_port_get_link_layer(device, dev->start_port + i) !=
IB_LINK_LAYER_INFINIBAND)
continue;
port = &dev->port[i];
port->dev = dev;
port->port_num = dev->start_port + i;
......@@ -820,6 +828,12 @@ static void mcast_add_one(struct ib_device *device)
port->table = RB_ROOT;
init_completion(&port->comp);
atomic_set(&port->refcount, 1);
++count;
}
if (!count) {
kfree(dev);
return;
}
dev->device = device;
......@@ -843,9 +857,12 @@ static void mcast_remove_one(struct ib_device *device)
flush_workqueue(mcast_wq);
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
port = &dev->port[i];
deref_port(port);
wait_for_completion(&port->comp);
if (rdma_port_get_link_layer(device, dev->start_port + i) ==
IB_LINK_LAYER_INFINIBAND) {
port = &dev->port[i];
deref_port(port);
wait_for_completion(&port->comp);
}
}
kfree(dev);
......
......@@ -416,6 +416,9 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
struct ib_sa_port *port =
&sa_dev->port[event->element.port_num - sa_dev->start_port];
if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
return;
spin_lock_irqsave(&port->ah_lock, flags);
if (port->sm_ah)
kref_put(&port->sm_ah->ref, free_sm_ah);
......@@ -1007,7 +1010,7 @@ static void ib_sa_add_one(struct ib_device *device)
e = device->phys_port_cnt;
}
sa_dev = kmalloc(sizeof *sa_dev +
sa_dev = kzalloc(sizeof *sa_dev +
(e - s + 1) * sizeof (struct ib_sa_port),
GFP_KERNEL);
if (!sa_dev)
......@@ -1017,9 +1020,12 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->end_port = e;
for (i = 0; i <= e - s; ++i) {
spin_lock_init(&sa_dev->port[i].ah_lock);
if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
continue;
sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s;
spin_lock_init(&sa_dev->port[i].ah_lock);
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
......@@ -1045,13 +1051,15 @@ static void ib_sa_add_one(struct ib_device *device)
goto err;
for (i = 0; i <= e - s; ++i)
update_sm_ah(&sa_dev->port[i].update_task);
if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
update_sm_ah(&sa_dev->port[i].update_task);
return;
err:
while (--i >= 0)
ib_unregister_mad_agent(sa_dev->port[i].agent);
if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
ib_unregister_mad_agent(sa_dev->port[i].agent);
kfree(sa_dev);
......@@ -1071,9 +1079,12 @@ static void ib_sa_remove_one(struct ib_device *device)
flush_scheduled_work();
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
ib_unregister_mad_agent(sa_dev->port[i].agent);
if (sa_dev->port[i].sm_ah)
kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
ib_unregister_mad_agent(sa_dev->port[i].agent);
if (sa_dev->port[i].sm_ah)
kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
}
}
kfree(sa_dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment