Commit 3810c1a1 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Remove mcg from rxe pools

Finish removing mcg from rxe pools. Replace rxe pools ref counting by
kref's. Replace rxe_alloc by kzalloc.

Link: https://lore.kernel.org/r/20220208211644.123457-8-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent d2ccf041
......@@ -28,7 +28,6 @@ void rxe_dealloc(struct ib_device *ib_dev)
rxe_pool_cleanup(&rxe->cq_pool);
rxe_pool_cleanup(&rxe->mr_pool);
rxe_pool_cleanup(&rxe->mw_pool);
rxe_pool_cleanup(&rxe->mc_grp_pool);
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
......@@ -157,15 +156,8 @@ static int rxe_init_pools(struct rxe_dev *rxe)
if (err)
goto err8;
err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
rxe->attr.max_mcast_grp);
if (err)
goto err9;
return 0;
err9:
rxe_pool_cleanup(&rxe->mw_pool);
err8:
rxe_pool_cleanup(&rxe->mr_pool);
err7:
......
......@@ -43,7 +43,7 @@ void rxe_cq_cleanup(struct rxe_pool_elem *arg);
struct rxe_mcg *rxe_lookup_mcg(struct rxe_dev *rxe, union ib_gid *mgid);
int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid);
void rxe_mc_cleanup(struct rxe_pool_elem *elem);
void rxe_cleanup_mcg(struct kref *kref);
/* rxe_mmap.c */
struct rxe_mmap_info {
......
......@@ -98,7 +98,7 @@ static struct rxe_mcg *__rxe_lookup_mcg(struct rxe_dev *rxe,
}
if (node) {
rxe_add_ref(mcg);
kref_get(&mcg->ref_cnt);
return mcg;
}
......@@ -142,6 +142,7 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
if (unlikely(err))
return err;
kref_init(&mcg->ref_cnt);
memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
INIT_LIST_HEAD(&mcg->qp_list);
mcg->rxe = rxe;
......@@ -153,7 +154,7 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
* Inserting mcg makes it visible to outside so this should
* be done last after the object is ready.
*/
rxe_add_ref(mcg);
kref_get(&mcg->ref_cnt);
__rxe_insert_mcg(mcg);
return 0;
......@@ -168,7 +169,6 @@ static int __rxe_init_mcg(struct rxe_dev *rxe, union ib_gid *mgid,
*/
static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
{
struct rxe_pool *pool = &rxe->mc_grp_pool;
struct rxe_mcg *mcg, *tmp;
unsigned long flags;
int err;
......@@ -182,7 +182,7 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
return mcg;
/* speculative alloc of new mcg */
mcg = rxe_alloc(pool);
mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
if (!mcg)
return ERR_PTR(-ENOMEM);
......@@ -190,7 +190,7 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
/* re-check to see if someone else just added it */
tmp = __rxe_lookup_mcg(rxe, mgid);
if (tmp) {
rxe_drop_ref(mcg);
kfree(mcg);
mcg = tmp;
goto out;
}
......@@ -210,10 +210,21 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
err_dec:
atomic_dec(&rxe->mcg_num);
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
rxe_drop_ref(mcg);
kfree(mcg);
return ERR_PTR(err);
}
/**
* rxe_cleanup_mcg - cleanup mcg for kref_put
* @kref:
*/
void rxe_cleanup_mcg(struct kref *kref)
{
struct rxe_mcg *mcg = container_of(kref, typeof(*mcg), ref_cnt);
kfree(mcg);
}
/**
* __rxe_destroy_mcg - destroy mcg object holding rxe->mcg_lock
* @mcg: the mcg object
......@@ -223,11 +234,14 @@ static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
*/
static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
{
struct rxe_dev *rxe = mcg->rxe;
/* remove mcg from red-black tree then drop ref */
__rxe_remove_mcg(mcg);
rxe_drop_ref(mcg);
kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
rxe_mcast_delete(mcg->rxe, &mcg->mgid);
atomic_dec(&rxe->mcg_num);
}
/**
......@@ -245,11 +259,6 @@ static void rxe_destroy_mcg(struct rxe_mcg *mcg)
spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags);
}
void rxe_mc_cleanup(struct rxe_pool_elem *elem)
{
/* nothing left to do for now */
}
static int rxe_attach_mcg(struct rxe_dev *rxe, struct rxe_qp *qp,
struct rxe_mcg *mcg)
{
......@@ -334,7 +343,7 @@ static int rxe_detach_mcg(struct rxe_dev *rxe, struct rxe_qp *qp,
/* drop the ref from get key. This will free the
* object if qp_num is zero.
*/
rxe_drop_ref(mcg);
kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
kfree(mca);
err = 0;
goto out_unlock;
......@@ -342,7 +351,7 @@ static int rxe_detach_mcg(struct rxe_dev *rxe, struct rxe_qp *qp,
}
/* we didn't find the qp on the list */
rxe_drop_ref(mcg);
kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
err = -EINVAL;
out_unlock:
......@@ -368,7 +377,7 @@ int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
if (atomic_read(&mcg->qp_num) == 0)
rxe_destroy_mcg(mcg);
rxe_drop_ref(mcg);
kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
return err;
}
......
......@@ -79,15 +79,6 @@ static const struct rxe_type_info {
.min_index = RXE_MIN_MW_INDEX,
.max_index = RXE_MAX_MW_INDEX,
},
[RXE_TYPE_MC_GRP] = {
.name = "rxe-mc_grp",
.size = sizeof(struct rxe_mcg),
.elem_offset = offsetof(struct rxe_mcg, elem),
.cleanup = rxe_mc_cleanup,
.flags = RXE_POOL_KEY,
.key_offset = offsetof(struct rxe_mcg, mgid),
.key_size = sizeof(union ib_gid),
},
};
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
......
......@@ -21,7 +21,6 @@ enum rxe_elem_type {
RXE_TYPE_CQ,
RXE_TYPE_MR,
RXE_TYPE_MW,
RXE_TYPE_MC_GRP,
RXE_NUM_TYPES, /* keep me last */
};
......
......@@ -300,7 +300,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
spin_unlock_bh(&rxe->mcg_lock);
rxe_drop_ref(mcg);
kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
if (likely(!skb))
return;
......
......@@ -352,8 +352,8 @@ struct rxe_mw {
};
struct rxe_mcg {
struct rxe_pool_elem elem;
struct rb_node node;
struct kref ref_cnt;
struct rxe_dev *rxe;
struct list_head qp_list;
union ib_gid mgid;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment