Commit 65ca8d96 authored by Raju Rangoju's avatar Raju Rangoju Committed by Jason Gunthorpe

rdma/cxgb4: Add support for 64Byte cqes

This patch adds support for iw_cxb4 to extend cqes from existing 32Byte
size to 64Byte.

Also includes adds backward compatibility support (for 32Byte) to work
with older libraries.
Signed-off-by: default avatarRaju Rangoju <rajur@chelsio.com>
Reviewed-by: default avatarSteve Wise <swise@opengridcomputing.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 15039efa
...@@ -77,6 +77,10 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -77,6 +77,10 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
int user = (uctx != &rdev->uctx); int user = (uctx != &rdev->uctx);
int ret; int ret;
struct sk_buff *skb; struct sk_buff *skb;
struct c4iw_ucontext *ucontext = NULL;
if (user)
ucontext = container_of(uctx, struct c4iw_ucontext, uctx);
cq->cqid = c4iw_get_cqid(rdev, uctx); cq->cqid = c4iw_get_cqid(rdev, uctx);
if (!cq->cqid) { if (!cq->cqid) {
...@@ -100,6 +104,16 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -100,6 +104,16 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
dma_unmap_addr_set(cq, mapping, cq->dma_addr); dma_unmap_addr_set(cq, mapping, cq->dma_addr);
memset(cq->queue, 0, cq->memsize); memset(cq->queue, 0, cq->memsize);
if (user && ucontext->is_32b_cqe) {
cq->qp_errp = &((struct t4_status_page *)
((u8 *)cq->queue + (cq->size - 1) *
(sizeof(*cq->queue) / 2)))->qp_err;
} else {
cq->qp_errp = &((struct t4_status_page *)
((u8 *)cq->queue + (cq->size - 1) *
sizeof(*cq->queue)))->qp_err;
}
/* build fw_ri_res_wr */ /* build fw_ri_res_wr */
wr_len = sizeof *res_wr + sizeof *res; wr_len = sizeof *res_wr + sizeof *res;
...@@ -132,7 +146,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -132,7 +146,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_IQPCIECH_V(2) | FW_RI_RES_WR_IQPCIECH_V(2) |
FW_RI_RES_WR_IQINTCNTTHRESH_V(0) | FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
FW_RI_RES_WR_IQO_F | FW_RI_RES_WR_IQO_F |
FW_RI_RES_WR_IQESIZE_V(1)); ((user && ucontext->is_32b_cqe) ?
FW_RI_RES_WR_IQESIZE_V(1) :
FW_RI_RES_WR_IQESIZE_V(2)));
res->u.cq.iqsize = cpu_to_be16(cq->size); res->u.cq.iqsize = cpu_to_be16(cq->size);
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr); res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
...@@ -884,6 +900,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -884,6 +900,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
int vector = attr->comp_vector; int vector = attr->comp_vector;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_cq *chp; struct c4iw_cq *chp;
struct c4iw_create_cq ucmd;
struct c4iw_create_cq_resp uresp; struct c4iw_create_cq_resp uresp;
struct c4iw_ucontext *ucontext = NULL; struct c4iw_ucontext *ucontext = NULL;
int ret, wr_len; int ret, wr_len;
...@@ -899,9 +916,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -899,9 +916,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (vector >= rhp->rdev.lldi.nciq) if (vector >= rhp->rdev.lldi.nciq)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
if (ib_context) {
ucontext = to_c4iw_ucontext(ib_context);
if (udata->inlen < sizeof(ucmd))
ucontext->is_32b_cqe = 1;
}
chp = kzalloc(sizeof(*chp), GFP_KERNEL); chp = kzalloc(sizeof(*chp), GFP_KERNEL);
if (!chp) if (!chp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL); chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
if (!chp->wr_waitp) { if (!chp->wr_waitp) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -916,9 +940,6 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -916,9 +940,6 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
goto err_free_wr_wait; goto err_free_wr_wait;
} }
if (ib_context)
ucontext = to_c4iw_ucontext(ib_context);
/* account for the status page. */ /* account for the status page. */
entries++; entries++;
...@@ -942,13 +963,15 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -942,13 +963,15 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (hwentries < 64) if (hwentries < 64)
hwentries = 64; hwentries = 64;
memsize = hwentries * sizeof *chp->cq.queue; memsize = hwentries * ((ucontext && ucontext->is_32b_cqe) ?
(sizeof(*chp->cq.queue) / 2) : sizeof(*chp->cq.queue));
/* /*
* memsize must be a multiple of the page size if its a user cq. * memsize must be a multiple of the page size if its a user cq.
*/ */
if (ucontext) if (ucontext)
memsize = roundup(memsize, PAGE_SIZE); memsize = roundup(memsize, PAGE_SIZE);
chp->cq.size = hwentries; chp->cq.size = hwentries;
chp->cq.memsize = memsize; chp->cq.memsize = memsize;
chp->cq.vector = vector; chp->cq.vector = vector;
...@@ -979,6 +1002,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -979,6 +1002,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
if (!mm2) if (!mm2)
goto err_free_mm; goto err_free_mm;
memset(&uresp, 0, sizeof(uresp));
uresp.qid_mask = rhp->rdev.cqmask; uresp.qid_mask = rhp->rdev.cqmask;
uresp.cqid = chp->cq.cqid; uresp.cqid = chp->cq.cqid;
uresp.size = chp->cq.size; uresp.size = chp->cq.size;
...@@ -988,9 +1012,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -988,9 +1012,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
ucontext->key += PAGE_SIZE; ucontext->key += PAGE_SIZE;
uresp.gts_key = ucontext->key; uresp.gts_key = ucontext->key;
ucontext->key += PAGE_SIZE; ucontext->key += PAGE_SIZE;
/* communicate to the userspace that
* kernel driver supports 64B CQE
*/
uresp.flags |= C4IW_64B_CQE;
spin_unlock(&ucontext->mmap_lock); spin_unlock(&ucontext->mmap_lock);
ret = ib_copy_to_udata(udata, &uresp, ret = ib_copy_to_udata(udata, &uresp,
sizeof(uresp) - sizeof(uresp.reserved)); ucontext->is_32b_cqe ?
sizeof(uresp) - sizeof(uresp.flags) :
sizeof(uresp));
if (ret) if (ret)
goto err_free_mm2; goto err_free_mm2;
......
...@@ -70,9 +70,10 @@ static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -70,9 +70,10 @@ static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
pr_debug("%016llx %016llx %016llx %016llx\n", pr_debug("%016llx %016llx %016llx %016llx - %016llx %016llx %016llx %016llx\n",
be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]), be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
be64_to_cpu(p[3])); be64_to_cpu(p[3]), be64_to_cpu(p[4]), be64_to_cpu(p[5]),
be64_to_cpu(p[6]), be64_to_cpu(p[7]));
/* /*
* Ingress WRITE and READ_RESP errors provide * Ingress WRITE and READ_RESP errors provide
......
...@@ -566,6 +566,7 @@ struct c4iw_ucontext { ...@@ -566,6 +566,7 @@ struct c4iw_ucontext {
spinlock_t mmap_lock; spinlock_t mmap_lock;
struct list_head mmaps; struct list_head mmaps;
struct kref kref; struct kref kref;
bool is_32b_cqe;
}; };
static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
......
...@@ -179,9 +179,20 @@ struct t4_cqe { ...@@ -179,9 +179,20 @@ struct t4_cqe {
__be32 wrid_hi; __be32 wrid_hi;
__be32 wrid_low; __be32 wrid_low;
} gen; } gen;
struct {
__be32 stag;
__be32 msn;
__be32 reserved;
__be32 abs_rqe_idx;
} srcqe;
struct {
__be64 imm_data;
} imm_data_rcqe;
u64 drain_cookie; u64 drain_cookie;
__be64 flits[3];
} u; } u;
__be64 reserved; __be64 reserved[3];
__be64 bits_type_ts; __be64 bits_type_ts;
}; };
...@@ -565,6 +576,7 @@ struct t4_cq { ...@@ -565,6 +576,7 @@ struct t4_cq {
u16 cidx_inc; u16 cidx_inc;
u8 gen; u8 gen;
u8 error; u8 error;
u8 *qp_errp;
unsigned long flags; unsigned long flags;
}; };
...@@ -698,12 +710,12 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe) ...@@ -698,12 +710,12 @@ static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline int t4_cq_in_error(struct t4_cq *cq) static inline int t4_cq_in_error(struct t4_cq *cq)
{ {
return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err; return *cq->qp_errp;
} }
static inline void t4_set_cq_in_error(struct t4_cq *cq) static inline void t4_set_cq_in_error(struct t4_cq *cq)
{ {
((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1; *cq->qp_errp = 1;
} }
#endif #endif
......
...@@ -44,6 +44,16 @@ ...@@ -44,6 +44,16 @@
* In particular do not use pointer types -- pass pointers in __aligned_u64 * In particular do not use pointer types -- pass pointers in __aligned_u64
* instead. * instead.
*/ */
enum {
C4IW_64B_CQE = (1 << 0)
};
struct c4iw_create_cq {
__u32 flags;
__u32 reserved;
};
struct c4iw_create_cq_resp { struct c4iw_create_cq_resp {
__aligned_u64 key; __aligned_u64 key;
__aligned_u64 gts_key; __aligned_u64 gts_key;
...@@ -51,7 +61,7 @@ struct c4iw_create_cq_resp { ...@@ -51,7 +61,7 @@ struct c4iw_create_cq_resp {
__u32 cqid; __u32 cqid;
__u32 size; __u32 size;
__u32 qid_mask; __u32 qid_mask;
__u32 reserved; /* explicit padding (optional for i386) */ __u32 flags;
}; };
enum { enum {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment