Commit 34f4c955 authored by Guy Levi's avatar Guy Levi Committed by Jason Gunthorpe

IB/mlx5: Use fragmented QP's buffer for in-kernel users

The current implementation of create QP requires contiguous memory, such a
requirement is problematic once the memory is fragmented or the system is
low in memory, it causes failures in dma_zalloc_coherent().

This patch takes advantage of the new mlx5_core API which allocates a
fragmented buffer. This makes the QP creation much more resilient to
memory fragmentation. Data-path code was adapted to the fact that WQEs can
cross buffers.

We also use the opportunity to fix some cosmetic legacy coding convention
errors which were in the feature scope.
Signed-off-by: default avatarGuy Levi <guyle@mellanox.com>
Reviewed-by: default avatarMajd Dibbiny <majd@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 20e5a59b
...@@ -257,6 +257,7 @@ enum mlx5_ib_rq_flags { ...@@ -257,6 +257,7 @@ enum mlx5_ib_rq_flags {
}; };
struct mlx5_ib_wq { struct mlx5_ib_wq {
struct mlx5_frag_buf_ctrl fbc;
u64 *wrid; u64 *wrid;
u32 *wr_data; u32 *wr_data;
struct wr_list *w_list; struct wr_list *w_list;
...@@ -275,7 +276,7 @@ struct mlx5_ib_wq { ...@@ -275,7 +276,7 @@ struct mlx5_ib_wq {
unsigned tail; unsigned tail;
u16 cur_post; u16 cur_post;
u16 last_poll; u16 last_poll;
void *qend; void *cur_edge;
}; };
enum mlx5_ib_wq_flags { enum mlx5_ib_wq_flags {
......
...@@ -108,21 +108,6 @@ static int is_sqp(enum ib_qp_type qp_type) ...@@ -108,21 +108,6 @@ static int is_sqp(enum ib_qp_type qp_type)
return is_qp0(qp_type) || is_qp1(qp_type); return is_qp0(qp_type) || is_qp1(qp_type);
} }
static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
{
return mlx5_buf_offset(&qp->buf, offset);
}
static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
{
return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
}
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
{
return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
}
/** /**
* mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space. * mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
* *
...@@ -917,6 +902,30 @@ static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -917,6 +902,30 @@ static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn); mlx5_ib_free_bfreg(dev, &context->bfregi, qp->bfregn);
} }
/* get_sq_edge - Get the next nearby edge.
*
* An 'edge' is defined as the first following address after the end
* of the fragment or the SQ. Accordingly, during the WQE construction
* which repetitively increases the pointer to write the next data, it
* simply should check if it gets to an edge.
*
* @sq - SQ buffer.
* @idx - Stride index in the SQ buffer.
*
* Return:
* The new edge.
*/
static void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx)
{
void *fragment_end;
fragment_end = mlx5_frag_buf_get_wqe
(&sq->fbc,
mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx));
return fragment_end + MLX5_SEND_WQE_BB;
}
static int create_kernel_qp(struct mlx5_ib_dev *dev, static int create_kernel_qp(struct mlx5_ib_dev *dev,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct mlx5_ib_qp *qp, struct mlx5_ib_qp *qp,
...@@ -955,13 +964,29 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, ...@@ -955,13 +964,29 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf); err = mlx5_frag_buf_alloc_node(dev->mdev, base->ubuffer.buf_size,
&qp->buf, dev->mdev->priv.numa_node);
if (err) { if (err) {
mlx5_ib_dbg(dev, "err %d\n", err); mlx5_ib_dbg(dev, "err %d\n", err);
return err; return err;
} }
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt); if (qp->rq.wqe_cnt)
mlx5_init_fbc(qp->buf.frags, qp->rq.wqe_shift,
ilog2(qp->rq.wqe_cnt), &qp->rq.fbc);
if (qp->sq.wqe_cnt) {
int sq_strides_offset = (qp->sq.offset & (PAGE_SIZE - 1)) /
MLX5_SEND_WQE_BB;
mlx5_init_fbc_offset(qp->buf.frags +
(qp->sq.offset / PAGE_SIZE),
ilog2(MLX5_SEND_WQE_BB),
ilog2(qp->sq.wqe_cnt),
sq_strides_offset, &qp->sq.fbc);
qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
}
*inlen = MLX5_ST_SZ_BYTES(create_qp_in) + *inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages; MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
*in = kvzalloc(*inlen, GFP_KERNEL); *in = kvzalloc(*inlen, GFP_KERNEL);
...@@ -983,8 +1008,9 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, ...@@ -983,8 +1008,9 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
qp->flags |= MLX5_IB_QP_SQPN_QP1; qp->flags |= MLX5_IB_QP_SQPN_QP1;
} }
mlx5_fill_page_array(&qp->buf, mlx5_fill_page_frag_array(&qp->buf,
(__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas)); (__be64 *)MLX5_ADDR_OF(create_qp_in,
*in, pas));
err = mlx5_db_alloc(dev->mdev, &qp->db); err = mlx5_db_alloc(dev->mdev, &qp->db);
if (err) { if (err) {
...@@ -1024,7 +1050,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, ...@@ -1024,7 +1050,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
kvfree(*in); kvfree(*in);
err_buf: err_buf:
mlx5_buf_free(dev->mdev, &qp->buf); mlx5_frag_buf_free(dev->mdev, &qp->buf);
return err; return err;
} }
...@@ -1036,7 +1062,7 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) ...@@ -1036,7 +1062,7 @@ static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
kvfree(qp->sq.wr_data); kvfree(qp->sq.wr_data);
kvfree(qp->rq.wrid); kvfree(qp->rq.wrid);
mlx5_db_free(dev->mdev, &qp->db); mlx5_db_free(dev->mdev, &qp->db);
mlx5_buf_free(dev->mdev, &qp->buf); mlx5_frag_buf_free(dev->mdev, &qp->buf);
} }
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr) static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
...@@ -3476,6 +3502,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -3476,6 +3502,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
qp->sq.head = 0; qp->sq.head = 0;
qp->sq.tail = 0; qp->sq.tail = 0;
qp->sq.cur_post = 0; qp->sq.cur_post = 0;
if (qp->sq.wqe_cnt)
qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
qp->sq.last_poll = 0; qp->sq.last_poll = 0;
qp->db.db[MLX5_RCV_DBR] = 0; qp->db.db[MLX5_RCV_DBR] = 0;
qp->db.db[MLX5_SND_DBR] = 0; qp->db.db[MLX5_SND_DBR] = 0;
...@@ -3750,6 +3778,62 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -3750,6 +3778,62 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return err; return err;
} }
static void _handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
u32 wqe_sz, void **cur_edge)
{
u32 idx;
idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1);
*cur_edge = get_sq_edge(sq, idx);
*seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx);
}
/* handle_post_send_edge - Check if we get to SQ edge. If yes, update to the
* next nearby edge and get new address translation for current WQE position.
* @sq - SQ buffer.
* @seg: Current WQE position (16B aligned).
* @wqe_sz: Total current WQE size [16B].
* @cur_edge: Updated current edge.
*/
static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg,
u32 wqe_sz, void **cur_edge)
{
if (likely(*seg != *cur_edge))
return;
_handle_post_send_edge(sq, seg, wqe_sz, cur_edge);
}
/* memcpy_send_wqe - copy data from src to WQE and update the relevant WQ's
* pointers. At the end @seg is aligned to 16B regardless the copied size.
* @sq - SQ buffer.
* @cur_edge: Updated current edge.
* @seg: Current WQE position (16B aligned).
* @wqe_sz: Total current WQE size [16B].
* @src: Pointer to copy from.
* @n: Number of bytes to copy.
*/
static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge,
void **seg, u32 *wqe_sz, const void *src,
size_t n)
{
while (likely(n)) {
size_t leftlen = *cur_edge - *seg;
size_t copysz = min_t(size_t, leftlen, n);
size_t stride;
memcpy(*seg, src, copysz);
n -= copysz;
src += copysz;
stride = !n ? ALIGN(copysz, 16) : copysz;
*seg += stride;
*wqe_sz += stride >> 4;
handle_post_send_edge(sq, seg, *wqe_sz, cur_edge);
}
}
static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
{ {
struct mlx5_ib_cq *cq; struct mlx5_ib_cq *cq;
...@@ -3775,11 +3859,10 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg, ...@@ -3775,11 +3859,10 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
rseg->reserved = 0; rseg->reserved = 0;
} }
static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg, static void set_eth_seg(const struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
const struct ib_send_wr *wr, void *qend, void **seg, int *size, void **cur_edge)
struct mlx5_ib_qp *qp, int *size)
{ {
void *seg = eseg; struct mlx5_wqe_eth_seg *eseg = *seg;
memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg)); memset(eseg, 0, sizeof(struct mlx5_wqe_eth_seg));
...@@ -3787,45 +3870,41 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg, ...@@ -3787,45 +3870,41 @@ static void *set_eth_seg(struct mlx5_wqe_eth_seg *eseg,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM |
MLX5_ETH_WQE_L4_CSUM; MLX5_ETH_WQE_L4_CSUM;
seg += sizeof(struct mlx5_wqe_eth_seg);
*size += sizeof(struct mlx5_wqe_eth_seg) / 16;
if (wr->opcode == IB_WR_LSO) { if (wr->opcode == IB_WR_LSO) {
struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr); struct ib_ud_wr *ud_wr = container_of(wr, struct ib_ud_wr, wr);
int size_of_inl_hdr_start = sizeof(eseg->inline_hdr.start); size_t left, copysz;
u64 left, leftlen, copysz;
void *pdata = ud_wr->header; void *pdata = ud_wr->header;
size_t stride;
left = ud_wr->hlen; left = ud_wr->hlen;
eseg->mss = cpu_to_be16(ud_wr->mss); eseg->mss = cpu_to_be16(ud_wr->mss);
eseg->inline_hdr.sz = cpu_to_be16(left); eseg->inline_hdr.sz = cpu_to_be16(left);
/* /* memcpy_send_wqe should get a 16B align address. Hence, we
* check if there is space till the end of queue, if yes, * first copy up to the current edge and then, if needed,
* copy all in one shot, otherwise copy till the end of queue, * fall-through to memcpy_send_wqe.
* rollback and than the copy the left
*/ */
leftlen = qend - (void *)eseg->inline_hdr.start; copysz = min_t(u64, *cur_edge - (void *)eseg->inline_hdr.start,
copysz = min_t(u64, leftlen, left); left);
memcpy(eseg->inline_hdr.start, pdata, copysz);
memcpy(seg - size_of_inl_hdr_start, pdata, copysz); stride = ALIGN(sizeof(struct mlx5_wqe_eth_seg) -
sizeof(eseg->inline_hdr.start) + copysz, 16);
if (likely(copysz > size_of_inl_hdr_start)) { *size += stride / 16;
seg += ALIGN(copysz - size_of_inl_hdr_start, 16); *seg += stride;
*size += ALIGN(copysz - size_of_inl_hdr_start, 16) / 16;
} if (copysz < left) {
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
if (unlikely(copysz < left)) { /* the last wqe in the queue */
seg = mlx5_get_send_wqe(qp, 0);
left -= copysz; left -= copysz;
pdata += copysz; pdata += copysz;
memcpy(seg, pdata, left); memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata,
seg += ALIGN(left, 16); left);
*size += ALIGN(left, 16) / 16;
} }
return;
} }
return seg; *seg += sizeof(struct mlx5_wqe_eth_seg);
*size += sizeof(struct mlx5_wqe_eth_seg) / 16;
} }
static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
...@@ -4084,24 +4163,6 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg, ...@@ -4084,24 +4163,6 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
} }
static void set_reg_umr_inline_seg(void *seg, struct mlx5_ib_qp *qp,
struct mlx5_ib_mr *mr, int mr_list_size)
{
void *qend = qp->sq.qend;
void *addr = mr->descs;
int copy;
if (unlikely(seg + mr_list_size > qend)) {
copy = qend - seg;
memcpy(seg, addr, copy);
addr += copy;
mr_list_size -= copy;
seg = mlx5_get_send_wqe(qp, 0);
}
memcpy(seg, addr, mr_list_size);
seg += mr_list_size;
}
static __be32 send_ieth(const struct ib_send_wr *wr) static __be32 send_ieth(const struct ib_send_wr *wr)
{ {
switch (wr->opcode) { switch (wr->opcode) {
...@@ -4135,40 +4196,48 @@ static u8 wq_sig(void *wqe) ...@@ -4135,40 +4196,48 @@ static u8 wq_sig(void *wqe)
} }
static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr, static int set_data_inl_seg(struct mlx5_ib_qp *qp, const struct ib_send_wr *wr,
void *wqe, int *sz) void **wqe, int *wqe_sz, void **cur_edge)
{ {
struct mlx5_wqe_inline_seg *seg; struct mlx5_wqe_inline_seg *seg;
void *qend = qp->sq.qend; size_t offset;
void *addr;
int inl = 0; int inl = 0;
int copy;
int len;
int i; int i;
seg = wqe; seg = *wqe;
wqe += sizeof(*seg); *wqe += sizeof(*seg);
offset = sizeof(*seg);
for (i = 0; i < wr->num_sge; i++) { for (i = 0; i < wr->num_sge; i++) {
addr = (void *)(unsigned long)(wr->sg_list[i].addr); size_t len = wr->sg_list[i].length;
len = wr->sg_list[i].length; void *addr = (void *)(unsigned long)(wr->sg_list[i].addr);
inl += len; inl += len;
if (unlikely(inl > qp->max_inline_data)) if (unlikely(inl > qp->max_inline_data))
return -ENOMEM; return -ENOMEM;
if (unlikely(wqe + len > qend)) { while (likely(len)) {
copy = qend - wqe; size_t leftlen;
memcpy(wqe, addr, copy); size_t copysz;
addr += copy;
len -= copy; handle_post_send_edge(&qp->sq, wqe,
wqe = mlx5_get_send_wqe(qp, 0); *wqe_sz + (offset >> 4),
cur_edge);
leftlen = *cur_edge - *wqe;
copysz = min_t(size_t, leftlen, len);
memcpy(*wqe, addr, copysz);
len -= copysz;
addr += copysz;
*wqe += copysz;
offset += copysz;
} }
memcpy(wqe, addr, len);
wqe += len;
} }
seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG); seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
*sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16; *wqe_sz += ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
return 0; return 0;
} }
...@@ -4281,7 +4350,8 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr, ...@@ -4281,7 +4350,8 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
} }
static int set_sig_data_segment(const struct ib_sig_handover_wr *wr, static int set_sig_data_segment(const struct ib_sig_handover_wr *wr,
struct mlx5_ib_qp *qp, void **seg, int *size) struct mlx5_ib_qp *qp, void **seg,
int *size, void **cur_edge)
{ {
struct ib_sig_attrs *sig_attrs = wr->sig_attrs; struct ib_sig_attrs *sig_attrs = wr->sig_attrs;
struct ib_mr *sig_mr = wr->sig_mr; struct ib_mr *sig_mr = wr->sig_mr;
...@@ -4365,8 +4435,7 @@ static int set_sig_data_segment(const struct ib_sig_handover_wr *wr, ...@@ -4365,8 +4435,7 @@ static int set_sig_data_segment(const struct ib_sig_handover_wr *wr,
*seg += wqe_size; *seg += wqe_size;
*size += wqe_size / 16; *size += wqe_size / 16;
if (unlikely((*seg == qp->sq.qend))) handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
*seg = mlx5_get_send_wqe(qp, 0);
bsf = *seg; bsf = *seg;
ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len); ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
...@@ -4375,8 +4444,7 @@ static int set_sig_data_segment(const struct ib_sig_handover_wr *wr, ...@@ -4375,8 +4444,7 @@ static int set_sig_data_segment(const struct ib_sig_handover_wr *wr,
*seg += sizeof(*bsf); *seg += sizeof(*bsf);
*size += sizeof(*bsf) / 16; *size += sizeof(*bsf) / 16;
if (unlikely((*seg == qp->sq.qend))) handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
*seg = mlx5_get_send_wqe(qp, 0);
return 0; return 0;
} }
...@@ -4414,7 +4482,8 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, ...@@ -4414,7 +4482,8 @@ static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
static int set_sig_umr_wr(const struct ib_send_wr *send_wr, static int set_sig_umr_wr(const struct ib_send_wr *send_wr,
struct mlx5_ib_qp *qp, void **seg, int *size) struct mlx5_ib_qp *qp, void **seg, int *size,
void **cur_edge)
{ {
const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr); const struct ib_sig_handover_wr *wr = sig_handover_wr(send_wr);
struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr); struct mlx5_ib_mr *sig_mr = to_mmr(wr->sig_mr);
...@@ -4446,16 +4515,14 @@ static int set_sig_umr_wr(const struct ib_send_wr *send_wr, ...@@ -4446,16 +4515,14 @@ static int set_sig_umr_wr(const struct ib_send_wr *send_wr,
set_sig_umr_segment(*seg, xlt_size); set_sig_umr_segment(*seg, xlt_size);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((*seg == qp->sq.qend))) handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
*seg = mlx5_get_send_wqe(qp, 0);
set_sig_mkey_segment(*seg, wr, xlt_size, region_len, pdn); set_sig_mkey_segment(*seg, wr, xlt_size, region_len, pdn);
*seg += sizeof(struct mlx5_mkey_seg); *seg += sizeof(struct mlx5_mkey_seg);
*size += sizeof(struct mlx5_mkey_seg) / 16; *size += sizeof(struct mlx5_mkey_seg) / 16;
if (unlikely((*seg == qp->sq.qend))) handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
*seg = mlx5_get_send_wqe(qp, 0);
ret = set_sig_data_segment(wr, qp, seg, size); ret = set_sig_data_segment(wr, qp, seg, size, cur_edge);
if (ret) if (ret)
return ret; return ret;
...@@ -4492,11 +4559,11 @@ static int set_psv_wr(struct ib_sig_domain *domain, ...@@ -4492,11 +4559,11 @@ static int set_psv_wr(struct ib_sig_domain *domain,
static int set_reg_wr(struct mlx5_ib_qp *qp, static int set_reg_wr(struct mlx5_ib_qp *qp,
const struct ib_reg_wr *wr, const struct ib_reg_wr *wr,
void **seg, int *size) void **seg, int *size, void **cur_edge)
{ {
struct mlx5_ib_mr *mr = to_mmr(wr->mr); struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
int mr_list_size = mr->ndescs * mr->desc_size; size_t mr_list_size = mr->ndescs * mr->desc_size;
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) { if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
...@@ -4508,18 +4575,17 @@ static int set_reg_wr(struct mlx5_ib_qp *qp, ...@@ -4508,18 +4575,17 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
set_reg_umr_seg(*seg, mr, umr_inline); set_reg_umr_seg(*seg, mr, umr_inline);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((*seg == qp->sq.qend))) handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
*seg = mlx5_get_send_wqe(qp, 0);
set_reg_mkey_seg(*seg, mr, wr->key, wr->access); set_reg_mkey_seg(*seg, mr, wr->key, wr->access);
*seg += sizeof(struct mlx5_mkey_seg); *seg += sizeof(struct mlx5_mkey_seg);
*size += sizeof(struct mlx5_mkey_seg) / 16; *size += sizeof(struct mlx5_mkey_seg) / 16;
if (unlikely((*seg == qp->sq.qend))) handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
*seg = mlx5_get_send_wqe(qp, 0);
if (umr_inline) { if (umr_inline) {
set_reg_umr_inline_seg(*seg, qp, mr, mr_list_size); memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs,
*size += get_xlt_octo(mr_list_size); mr_list_size);
*size = ALIGN(*size, MLX5_SEND_WQE_BB >> 4);
} else { } else {
set_reg_data_seg(*seg, mr, pd); set_reg_data_seg(*seg, mr, pd);
*seg += sizeof(struct mlx5_wqe_data_seg); *seg += sizeof(struct mlx5_wqe_data_seg);
...@@ -4528,32 +4594,31 @@ static int set_reg_wr(struct mlx5_ib_qp *qp, ...@@ -4528,32 +4594,31 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
return 0; return 0;
} }
static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size) static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size,
void **cur_edge)
{ {
set_linv_umr_seg(*seg); set_linv_umr_seg(*seg);
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((*seg == qp->sq.qend))) handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
*seg = mlx5_get_send_wqe(qp, 0);
set_linv_mkey_seg(*seg); set_linv_mkey_seg(*seg);
*seg += sizeof(struct mlx5_mkey_seg); *seg += sizeof(struct mlx5_mkey_seg);
*size += sizeof(struct mlx5_mkey_seg) / 16; *size += sizeof(struct mlx5_mkey_seg) / 16;
if (unlikely((*seg == qp->sq.qend))) handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
*seg = mlx5_get_send_wqe(qp, 0);
} }
static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) static void dump_wqe(struct mlx5_ib_qp *qp, u32 idx, int size_16)
{ {
__be32 *p = NULL; __be32 *p = NULL;
int tidx = idx; u32 tidx = idx;
int i, j; int i, j;
pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx)); pr_debug("dump WQE index %u:\n", idx);
for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) { for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
if ((i & 0xf) == 0) { if ((i & 0xf) == 0) {
void *buf = mlx5_get_send_wqe(qp, tidx);
tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1); tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
p = buf; p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, tidx);
pr_debug("WQBB at %p:\n", (void *)p);
j = 0; j = 0;
} }
pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]), pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
...@@ -4563,15 +4628,16 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) ...@@ -4563,15 +4628,16 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
} }
static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg, static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl, struct mlx5_wqe_ctrl_seg **ctrl,
const struct ib_send_wr *wr, unsigned *idx, const struct ib_send_wr *wr, unsigned int *idx,
int *size, int nreq, bool send_signaled, bool solicited) int *size, void **cur_edge, int nreq,
bool send_signaled, bool solicited)
{ {
if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)))
return -ENOMEM; return -ENOMEM;
*idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
*seg = mlx5_get_send_wqe(qp, *idx); *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx);
*ctrl = *seg; *ctrl = *seg;
*(uint32_t *)(*seg + 8) = 0; *(uint32_t *)(*seg + 8) = 0;
(*ctrl)->imm = send_ieth(wr); (*ctrl)->imm = send_ieth(wr);
...@@ -4581,6 +4647,7 @@ static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg, ...@@ -4581,6 +4647,7 @@ static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
*seg += sizeof(**ctrl); *seg += sizeof(**ctrl);
*size = sizeof(**ctrl) / 16; *size = sizeof(**ctrl) / 16;
*cur_edge = qp->sq.cur_edge;
return 0; return 0;
} }
...@@ -4588,17 +4655,18 @@ static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg, ...@@ -4588,17 +4655,18 @@ static int __begin_wqe(struct mlx5_ib_qp *qp, void **seg,
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl, struct mlx5_wqe_ctrl_seg **ctrl,
const struct ib_send_wr *wr, unsigned *idx, const struct ib_send_wr *wr, unsigned *idx,
int *size, int nreq) int *size, void **cur_edge, int nreq)
{ {
return __begin_wqe(qp, seg, ctrl, wr, idx, size, nreq, return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq,
wr->send_flags & IB_SEND_SIGNALED, wr->send_flags & IB_SEND_SIGNALED,
wr->send_flags & IB_SEND_SOLICITED); wr->send_flags & IB_SEND_SOLICITED);
} }
static void finish_wqe(struct mlx5_ib_qp *qp, static void finish_wqe(struct mlx5_ib_qp *qp,
struct mlx5_wqe_ctrl_seg *ctrl, struct mlx5_wqe_ctrl_seg *ctrl,
u8 size, unsigned idx, u64 wr_id, void *seg, u8 size, void *cur_edge,
int nreq, u8 fence, u32 mlx5_opcode) unsigned int idx, u64 wr_id, int nreq, u8 fence,
u32 mlx5_opcode)
{ {
u8 opmod = 0; u8 opmod = 0;
...@@ -4614,6 +4682,15 @@ static void finish_wqe(struct mlx5_ib_qp *qp, ...@@ -4614,6 +4682,15 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
qp->sq.wqe_head[idx] = qp->sq.head + nreq; qp->sq.wqe_head[idx] = qp->sq.head + nreq;
qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
qp->sq.w_list[idx].next = qp->sq.cur_post; qp->sq.w_list[idx].next = qp->sq.cur_post;
/* We save the edge which was possibly updated during the WQE
* construction, into SQ's cache.
*/
seg = PTR_ALIGN(seg, MLX5_SEND_WQE_BB);
qp->sq.cur_edge = (unlikely(seg == cur_edge)) ?
get_sq_edge(&qp->sq, qp->sq.cur_post &
(qp->sq.wqe_cnt - 1)) :
cur_edge;
} }
static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
...@@ -4624,11 +4701,10 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4624,11 +4701,10 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_qp *qp; struct mlx5_ib_qp *qp;
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
struct mlx5_wqe_data_seg *dpseg;
struct mlx5_wqe_xrc_seg *xrc; struct mlx5_wqe_xrc_seg *xrc;
struct mlx5_bf *bf; struct mlx5_bf *bf;
void *cur_edge;
int uninitialized_var(size); int uninitialized_var(size);
void *qend;
unsigned long flags; unsigned long flags;
unsigned idx; unsigned idx;
int err = 0; int err = 0;
...@@ -4650,7 +4726,6 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4650,7 +4726,6 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
qp = to_mqp(ibqp); qp = to_mqp(ibqp);
bf = &qp->bf; bf = &qp->bf;
qend = qp->sq.qend;
spin_lock_irqsave(&qp->sq.lock, flags); spin_lock_irqsave(&qp->sq.lock, flags);
...@@ -4670,7 +4745,8 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4670,7 +4745,8 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
goto out; goto out;
} }
err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, &cur_edge,
nreq);
if (err) { if (err) {
mlx5_ib_warn(dev, "\n"); mlx5_ib_warn(dev, "\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -4719,14 +4795,15 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4719,14 +4795,15 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
case IB_WR_LOCAL_INV: case IB_WR_LOCAL_INV:
qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
set_linv_wr(qp, &seg, &size); set_linv_wr(qp, &seg, &size, &cur_edge);
num_sge = 0; num_sge = 0;
break; break;
case IB_WR_REG_MR: case IB_WR_REG_MR:
qp->sq.wr_data[idx] = IB_WR_REG_MR; qp->sq.wr_data[idx] = IB_WR_REG_MR;
ctrl->imm = cpu_to_be32(reg_wr(wr)->key); ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
err = set_reg_wr(qp, reg_wr(wr), &seg, &size); err = set_reg_wr(qp, reg_wr(wr), &seg, &size,
&cur_edge);
if (err) { if (err) {
*bad_wr = wr; *bad_wr = wr;
goto out; goto out;
...@@ -4739,21 +4816,24 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4739,21 +4816,24 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
mr = to_mmr(sig_handover_wr(wr)->sig_mr); mr = to_mmr(sig_handover_wr(wr)->sig_mr);
ctrl->imm = cpu_to_be32(mr->ibmr.rkey); ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
err = set_sig_umr_wr(wr, qp, &seg, &size); err = set_sig_umr_wr(wr, qp, &seg, &size,
&cur_edge);
if (err) { if (err) {
mlx5_ib_warn(dev, "\n"); mlx5_ib_warn(dev, "\n");
*bad_wr = wr; *bad_wr = wr;
goto out; goto out;
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
fence, MLX5_OPCODE_UMR); wr->wr_id, nreq, fence,
MLX5_OPCODE_UMR);
/* /*
* SET_PSV WQEs are not signaled and solicited * SET_PSV WQEs are not signaled and solicited
* on error * on error
*/ */
err = __begin_wqe(qp, &seg, &ctrl, wr, &idx, err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
&size, nreq, false, true); &size, &cur_edge, nreq, false,
true);
if (err) { if (err) {
mlx5_ib_warn(dev, "\n"); mlx5_ib_warn(dev, "\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -4770,10 +4850,12 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4770,10 +4850,12 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
goto out; goto out;
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
fence, MLX5_OPCODE_SET_PSV); wr->wr_id, nreq, fence,
MLX5_OPCODE_SET_PSV);
err = __begin_wqe(qp, &seg, &ctrl, wr, &idx, err = __begin_wqe(qp, &seg, &ctrl, wr, &idx,
&size, nreq, false, true); &size, &cur_edge, nreq, false,
true);
if (err) { if (err) {
mlx5_ib_warn(dev, "\n"); mlx5_ib_warn(dev, "\n");
err = -ENOMEM; err = -ENOMEM;
...@@ -4790,8 +4872,9 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4790,8 +4872,9 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
goto out; goto out;
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, finish_wqe(qp, ctrl, seg, size, cur_edge, idx,
fence, MLX5_OPCODE_SET_PSV); wr->wr_id, nreq, fence,
MLX5_OPCODE_SET_PSV);
qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
num_sge = 0; num_sge = 0;
goto skip_psv; goto skip_psv;
...@@ -4828,16 +4911,14 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4828,16 +4911,14 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
set_datagram_seg(seg, wr); set_datagram_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_datagram_seg); seg += sizeof(struct mlx5_wqe_datagram_seg);
size += sizeof(struct mlx5_wqe_datagram_seg) / 16; size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
if (unlikely((seg == qend))) handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
seg = mlx5_get_send_wqe(qp, 0);
break; break;
case IB_QPT_UD: case IB_QPT_UD:
set_datagram_seg(seg, wr); set_datagram_seg(seg, wr);
seg += sizeof(struct mlx5_wqe_datagram_seg); seg += sizeof(struct mlx5_wqe_datagram_seg);
size += sizeof(struct mlx5_wqe_datagram_seg) / 16; size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
if (unlikely((seg == qend)))
seg = mlx5_get_send_wqe(qp, 0);
/* handle qp that supports ud offload */ /* handle qp that supports ud offload */
if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) { if (qp->flags & IB_QP_CREATE_IPOIB_UD_LSO) {
...@@ -4847,11 +4928,9 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4847,11 +4928,9 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad)); memset(pad, 0, sizeof(struct mlx5_wqe_eth_pad));
seg += sizeof(struct mlx5_wqe_eth_pad); seg += sizeof(struct mlx5_wqe_eth_pad);
size += sizeof(struct mlx5_wqe_eth_pad) / 16; size += sizeof(struct mlx5_wqe_eth_pad) / 16;
set_eth_seg(wr, qp, &seg, &size, &cur_edge);
seg = set_eth_seg(seg, wr, qend, qp, &size); handle_post_send_edge(&qp->sq, &seg, size,
&cur_edge);
if (unlikely((seg == qend)))
seg = mlx5_get_send_wqe(qp, 0);
} }
break; break;
case MLX5_IB_QPT_REG_UMR: case MLX5_IB_QPT_REG_UMR:
...@@ -4867,13 +4946,11 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4867,13 +4946,11 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
goto out; goto out;
seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
if (unlikely((seg == qend))) handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
seg = mlx5_get_send_wqe(qp, 0);
set_reg_mkey_segment(seg, wr); set_reg_mkey_segment(seg, wr);
seg += sizeof(struct mlx5_mkey_seg); seg += sizeof(struct mlx5_mkey_seg);
size += sizeof(struct mlx5_mkey_seg) / 16; size += sizeof(struct mlx5_mkey_seg) / 16;
if (unlikely((seg == qend))) handle_post_send_edge(&qp->sq, &seg, size, &cur_edge);
seg = mlx5_get_send_wqe(qp, 0);
break; break;
default: default:
...@@ -4881,33 +4958,29 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -4881,33 +4958,29 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
} }
if (wr->send_flags & IB_SEND_INLINE && num_sge) { if (wr->send_flags & IB_SEND_INLINE && num_sge) {
int uninitialized_var(sz); err = set_data_inl_seg(qp, wr, &seg, &size, &cur_edge);
err = set_data_inl_seg(qp, wr, seg, &sz);
if (unlikely(err)) { if (unlikely(err)) {
mlx5_ib_warn(dev, "\n"); mlx5_ib_warn(dev, "\n");
*bad_wr = wr; *bad_wr = wr;
goto out; goto out;
} }
size += sz;
} else { } else {
dpseg = seg;
for (i = 0; i < num_sge; i++) { for (i = 0; i < num_sge; i++) {
if (unlikely(dpseg == qend)) { handle_post_send_edge(&qp->sq, &seg, size,
seg = mlx5_get_send_wqe(qp, 0); &cur_edge);
dpseg = seg;
}
if (likely(wr->sg_list[i].length)) { if (likely(wr->sg_list[i].length)) {
set_data_ptr_seg(dpseg, wr->sg_list + i); set_data_ptr_seg
((struct mlx5_wqe_data_seg *)seg,
wr->sg_list + i);
size += sizeof(struct mlx5_wqe_data_seg) / 16; size += sizeof(struct mlx5_wqe_data_seg) / 16;
dpseg++; seg += sizeof(struct mlx5_wqe_data_seg);
} }
} }
} }
qp->next_fence = next_fence; qp->next_fence = next_fence;
finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence, finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq,
mlx5_ib_opcode[wr->opcode]); fence, mlx5_ib_opcode[wr->opcode]);
skip_psv: skip_psv:
if (0) if (0)
dump_wqe(qp, idx, size); dump_wqe(qp, idx, size);
...@@ -4993,7 +5066,7 @@ static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, ...@@ -4993,7 +5066,7 @@ static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
goto out; goto out;
} }
scat = get_recv_wqe(qp, ind); scat = mlx5_frag_buf_get_wqe(&qp->rq.fbc, ind);
if (qp->wq_sig) if (qp->wq_sig)
scat++; scat++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment