Commit 3788d299 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/cma: Consolidate the destruction of a cma_multicast in one place

Two places were open coding this sequence, and also pull in
cma_leave_roce_mc_group() which was called only once.

Link: https://lore.kernel.org/r/20200902081122.745412-8-leon@kernel.orgSigned-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 1bb5091d
...@@ -1775,19 +1775,30 @@ static void cma_release_port(struct rdma_id_private *id_priv) ...@@ -1775,19 +1775,30 @@ static void cma_release_port(struct rdma_id_private *id_priv)
mutex_unlock(&lock); mutex_unlock(&lock);
} }
static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv, static void destroy_mc(struct rdma_id_private *id_priv,
struct cma_multicast *mc) struct cma_multicast *mc)
{ {
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) {
ib_sa_free_multicast(mc->multicast.ib);
kfree(mc);
return;
}
if (rdma_protocol_roce(id_priv->id.device,
id_priv->id.port_num)) {
struct rdma_dev_addr *dev_addr =
&id_priv->id.route.addr.dev_addr;
struct net_device *ndev = NULL; struct net_device *ndev = NULL;
if (dev_addr->bound_dev_if) if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); ndev = dev_get_by_index(dev_addr->net,
dev_addr->bound_dev_if);
if (ndev) { if (ndev) {
cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false); cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid, false);
dev_put(ndev); dev_put(ndev);
} }
kref_put(&mc->mcref, release_mc); kref_put(&mc->mcref, release_mc);
}
} }
static void cma_leave_mc_groups(struct rdma_id_private *id_priv) static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
...@@ -1795,16 +1806,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) ...@@ -1795,16 +1806,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
struct cma_multicast *mc; struct cma_multicast *mc;
while (!list_empty(&id_priv->mc_list)) { while (!list_empty(&id_priv->mc_list)) {
mc = container_of(id_priv->mc_list.next, mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
struct cma_multicast, list); list);
list_del(&mc->list); list_del(&mc->list);
if (rdma_cap_ib_mcast(id_priv->cma_dev->device, destroy_mc(id_priv, mc);
id_priv->id.port_num)) {
ib_sa_free_multicast(mc->multicast.ib);
kfree(mc);
} else {
cma_leave_roce_mc_group(id_priv, mc);
}
} }
} }
...@@ -4641,21 +4646,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) ...@@ -4641,21 +4646,15 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
id_priv = container_of(id, struct rdma_id_private, id); id_priv = container_of(id, struct rdma_id_private, id);
spin_lock_irq(&id_priv->lock); spin_lock_irq(&id_priv->lock);
list_for_each_entry(mc, &id_priv->mc_list, list) { list_for_each_entry(mc, &id_priv->mc_list, list) {
if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) { if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
continue;
list_del(&mc->list); list_del(&mc->list);
spin_unlock_irq(&id_priv->lock); spin_unlock_irq(&id_priv->lock);
BUG_ON(id_priv->cma_dev->device != id->device); WARN_ON(id_priv->cma_dev->device != id->device);
destroy_mc(id_priv, mc);
if (rdma_cap_ib_mcast(id->device, id->port_num)) {
ib_sa_free_multicast(mc->multicast.ib);
kfree(mc);
} else if (rdma_protocol_roce(id->device, id->port_num)) {
cma_leave_roce_mc_group(id_priv, mc);
}
return; return;
} }
}
spin_unlock_irq(&id_priv->lock); spin_unlock_irq(&id_priv->lock);
} }
EXPORT_SYMBOL(rdma_leave_multicast); EXPORT_SYMBOL(rdma_leave_multicast);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment