Commit 4f87ccfc authored by Dennis Dalessandro's avatar Dennis Dalessandro Committed by Doug Ledford

staging/rdma/hfi1: Use rdmavt protection domain

Remove protection domain from hfi1 and use rdmavt's version.
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent f326674a
...@@ -176,7 +176,7 @@ void hfi1_free_lkey(struct hfi1_mregion *mr) ...@@ -176,7 +176,7 @@ void hfi1_free_lkey(struct hfi1_mregion *mr)
* Check the IB SGE for validity and initialize our internal version * Check the IB SGE for validity and initialize our internal version
* of it. * of it.
*/ */
int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct hfi1_pd *pd, int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct rvt_pd *pd,
struct hfi1_sge *isge, struct ib_sge *sge, int acc) struct hfi1_sge *isge, struct ib_sge *sge, int acc)
{ {
struct hfi1_mregion *mr; struct hfi1_mregion *mr;
...@@ -285,7 +285,7 @@ int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, ...@@ -285,7 +285,7 @@ int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
*/ */
rcu_read_lock(); rcu_read_lock();
if (rkey == 0) { if (rkey == 0) {
struct hfi1_pd *pd = to_ipd(qp->ibqp.pd); struct rvt_pd *pd = ibpd_to_rvtpd(qp->ibqp.pd);
struct hfi1_ibdev *dev = to_idev(pd->ibpd.device); struct hfi1_ibdev *dev = to_idev(pd->ibpd.device);
if (pd->user) if (pd->user)
......
...@@ -116,7 +116,7 @@ struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -116,7 +116,7 @@ struct ib_mr *hfi1_get_dma_mr(struct ib_pd *pd, int acc)
struct ib_mr *ret; struct ib_mr *ret;
int rval; int rval;
if (to_ipd(pd)->user) { if (ibpd_to_rvtpd(pd)->user) {
ret = ERR_PTR(-EPERM); ret = ERR_PTR(-EPERM);
goto bail; goto bail;
} }
......
...@@ -102,11 +102,11 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe) ...@@ -102,11 +102,11 @@ static int init_sge(struct hfi1_qp *qp, struct hfi1_rwqe *wqe)
int i, j, ret; int i, j, ret;
struct ib_wc wc; struct ib_wc wc;
struct hfi1_lkey_table *rkt; struct hfi1_lkey_table *rkt;
struct hfi1_pd *pd; struct rvt_pd *pd;
struct hfi1_sge_state *ss; struct hfi1_sge_state *ss;
rkt = &to_idev(qp->ibqp.device)->lk_table; rkt = &to_idev(qp->ibqp.device)->lk_table;
pd = to_ipd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd); pd = ibpd_to_rvtpd(qp->ibqp.srq ? qp->ibqp.srq->pd : qp->ibqp.pd);
ss = &qp->r_sge; ss = &qp->r_sge;
ss->sg_list = qp->r_sg_list; ss->sg_list = qp->r_sg_list;
qp->r_len = 0; qp->r_len = 0;
......
...@@ -368,7 +368,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) ...@@ -368,7 +368,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr)
int j; int j;
int acc; int acc;
struct hfi1_lkey_table *rkt; struct hfi1_lkey_table *rkt;
struct hfi1_pd *pd; struct rvt_pd *pd;
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
struct hfi1_pportdata *ppd; struct hfi1_pportdata *ppd;
struct hfi1_ibport *ibp; struct hfi1_ibport *ibp;
...@@ -413,7 +413,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr) ...@@ -413,7 +413,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr)
return -ENOMEM; return -ENOMEM;
rkt = &to_idev(qp->ibqp.device)->lk_table; rkt = &to_idev(qp->ibqp.device)->lk_table;
pd = to_ipd(qp->ibqp.pd); pd = ibpd_to_rvtpd(qp->ibqp.pd);
wqe = get_swqe_ptr(qp, qp->s_head); wqe = get_swqe_ptr(qp, qp->s_head);
...@@ -1394,7 +1394,7 @@ static int query_device(struct ib_device *ibdev, ...@@ -1394,7 +1394,7 @@ static int query_device(struct ib_device *ibdev,
props->max_mr = dev->lk_table.max; props->max_mr = dev->lk_table.max;
props->max_fmr = dev->lk_table.max; props->max_fmr = dev->lk_table.max;
props->max_map_per_fmr = 32767; props->max_map_per_fmr = 32767;
props->max_pd = hfi1_max_pds; props->max_pd = dev->rdi.dparms.props.max_pd;
props->max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC; props->max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
props->max_qp_init_rd_atom = 255; props->max_qp_init_rd_atom = 255;
/* props->max_res_rd_atom */ /* props->max_res_rd_atom */
...@@ -1592,61 +1592,6 @@ static int query_gid(struct ib_device *ibdev, u8 port, ...@@ -1592,61 +1592,6 @@ static int query_gid(struct ib_device *ibdev, u8 port,
return ret; return ret;
} }
static struct ib_pd *alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
{
struct hfi1_ibdev *dev = to_idev(ibdev);
struct hfi1_pd *pd;
struct ib_pd *ret;
/*
* This is actually totally arbitrary. Some correctness tests
* assume there's a maximum number of PDs that can be allocated.
* We don't actually have this limit, but we fail the test if
* we allow allocations of more than we report for this value.
*/
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
ret = ERR_PTR(-ENOMEM);
goto bail;
}
spin_lock(&dev->n_pds_lock);
if (dev->n_pds_allocated == hfi1_max_pds) {
spin_unlock(&dev->n_pds_lock);
kfree(pd);
ret = ERR_PTR(-ENOMEM);
goto bail;
}
dev->n_pds_allocated++;
spin_unlock(&dev->n_pds_lock);
/* ib_alloc_pd() will initialize pd->ibpd. */
pd->user = udata != NULL;
ret = &pd->ibpd;
bail:
return ret;
}
static int dealloc_pd(struct ib_pd *ibpd)
{
struct hfi1_pd *pd = to_ipd(ibpd);
struct hfi1_ibdev *dev = to_idev(ibpd->device);
spin_lock(&dev->n_pds_lock);
dev->n_pds_allocated--;
spin_unlock(&dev->n_pds_lock);
kfree(pd);
return 0;
}
/* /*
* convert ah port,sl to sc * convert ah port,sl to sc
*/ */
...@@ -1920,7 +1865,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ...@@ -1920,7 +1865,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
init_ibport(ppd + i); init_ibport(ppd + i);
/* Only need to initialize non-zero fields. */ /* Only need to initialize non-zero fields. */
spin_lock_init(&dev->n_pds_lock);
spin_lock_init(&dev->n_ahs_lock); spin_lock_init(&dev->n_ahs_lock);
spin_lock_init(&dev->n_cqs_lock); spin_lock_init(&dev->n_cqs_lock);
spin_lock_init(&dev->n_qps_lock); spin_lock_init(&dev->n_qps_lock);
...@@ -2029,8 +1974,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd) ...@@ -2029,8 +1974,8 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
ibdev->query_gid = query_gid; ibdev->query_gid = query_gid;
ibdev->alloc_ucontext = alloc_ucontext; ibdev->alloc_ucontext = alloc_ucontext;
ibdev->dealloc_ucontext = dealloc_ucontext; ibdev->dealloc_ucontext = dealloc_ucontext;
ibdev->alloc_pd = alloc_pd; ibdev->alloc_pd = NULL;
ibdev->dealloc_pd = dealloc_pd; ibdev->dealloc_pd = NULL;
ibdev->create_ah = create_ah; ibdev->create_ah = create_ah;
ibdev->destroy_ah = destroy_ah; ibdev->destroy_ah = destroy_ah;
ibdev->modify_ah = modify_ah; ibdev->modify_ah = modify_ah;
......
...@@ -234,12 +234,6 @@ struct hfi1_mcast { ...@@ -234,12 +234,6 @@ struct hfi1_mcast {
int n_attached; int n_attached;
}; };
/* Protection domain */
struct hfi1_pd {
struct ib_pd ibpd;
int user; /* non-zero if created from user space */
};
/* Address Handle */ /* Address Handle */
struct hfi1_ah { struct hfi1_ah {
struct ib_ah ibah; struct ib_ah ibah;
...@@ -776,8 +770,6 @@ struct hfi1_ibdev { ...@@ -776,8 +770,6 @@ struct hfi1_ibdev {
u64 n_kmem_wait; u64 n_kmem_wait;
u64 n_send_schedule; u64 n_send_schedule;
u32 n_pds_allocated; /* number of PDs allocated for device */
spinlock_t n_pds_lock;
u32 n_ahs_allocated; /* number of AHs allocated for device */ u32 n_ahs_allocated; /* number of AHs allocated for device */
spinlock_t n_ahs_lock; spinlock_t n_ahs_lock;
u32 n_cqs_allocated; /* number of CQs allocated for device */ u32 n_cqs_allocated; /* number of CQs allocated for device */
...@@ -817,11 +809,6 @@ static inline struct hfi1_mr *to_imr(struct ib_mr *ibmr) ...@@ -817,11 +809,6 @@ static inline struct hfi1_mr *to_imr(struct ib_mr *ibmr)
return container_of(ibmr, struct hfi1_mr, ibmr); return container_of(ibmr, struct hfi1_mr, ibmr);
} }
static inline struct hfi1_pd *to_ipd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct hfi1_pd, ibpd);
}
static inline struct hfi1_ah *to_iah(struct ib_ah *ibah) static inline struct hfi1_ah *to_iah(struct ib_ah *ibah)
{ {
return container_of(ibah, struct hfi1_ah, ibah); return container_of(ibah, struct hfi1_ah, ibah);
...@@ -983,7 +970,7 @@ int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region); ...@@ -983,7 +970,7 @@ int hfi1_alloc_lkey(struct hfi1_mregion *mr, int dma_region);
void hfi1_free_lkey(struct hfi1_mregion *mr); void hfi1_free_lkey(struct hfi1_mregion *mr);
int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct hfi1_pd *pd, int hfi1_lkey_ok(struct hfi1_lkey_table *rkt, struct rvt_pd *pd,
struct hfi1_sge *isge, struct ib_sge *sge, int acc); struct hfi1_sge *isge, struct ib_sge *sge, int acc);
int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge, int hfi1_rkey_ok(struct hfi1_qp *qp, struct hfi1_sge *sge,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment