Commit ea54b10c authored by Jack Morgenstein's avatar Jack Morgenstein Committed by Roland Dreier

IB/mlx4: Use multiple WQ blocks to post smaller send WQEs

ConnectX HCA supports shrinking WQEs, so that a single work request
can be made of multiple units of wqe_shift.  This way, WRs can differ
in size, and do not have to be a power of 2 in size, saving memory and
speeding up send WR posting.  Unfortunately, if we do this then the
wqe_index field in CQEs can't be used to look up the WR ID anymore, so
our implementation does this only if selective signaling is off.

Further, on 32-bit platforms, we can't use vmap() to make the QP
buffer virtually contigious. Thus we have to use constant-sized WRs to
make sure a WR is always fully within a single page-sized chunk.

Finally, we use WRs with the NOP opcode to avoid wrapping around the
queue buffer in the middle of posting a WR, and we set the
NoErrorCompletion bit to avoid getting completions with error for NOP
WRs.  However, NEC is only supported starting with firmware 2.2.232,
so we use constant-sized WRs for older firmware.  And, since MLX QPs
only support SEND, we use constant-sized WRs in this case.

When stamping during NOP posting, do stamping following setting of the
NOP WQE valid bit.
Signed-off-by: default avatarMichael S. Tsirkin <mst@dev.mellanox.co.il>
Signed-off-by: default avatarJack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent b57aacfa
...@@ -326,6 +326,12 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -326,6 +326,12 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR; MLX4_CQE_OPCODE_ERROR;
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
is_send)) {
printk(KERN_WARNING "Completion for NOP opcode detected!\n");
return -EINVAL;
}
if (!*cur_qp || if (!*cur_qp ||
(be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
/* /*
...@@ -348,8 +354,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -348,8 +354,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
if (is_send) { if (is_send) {
wq = &(*cur_qp)->sq; wq = &(*cur_qp)->sq;
wqe_ctr = be16_to_cpu(cqe->wqe_index); if (!(*cur_qp)->sq_signal_bits) {
wq->tail += (u16) (wqe_ctr - (u16) wq->tail); wqe_ctr = be16_to_cpu(cqe->wqe_index);
wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
}
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail; ++wq->tail;
} else if ((*cur_qp)->ibqp.srq) { } else if ((*cur_qp)->ibqp.srq) {
......
...@@ -120,6 +120,8 @@ struct mlx4_ib_qp { ...@@ -120,6 +120,8 @@ struct mlx4_ib_qp {
u32 doorbell_qpn; u32 doorbell_qpn;
__be32 sq_signal_bits; __be32 sq_signal_bits;
unsigned sq_next_wqe;
int sq_max_wqes_per_wr;
int sq_spare_wqes; int sq_spare_wqes;
struct mlx4_ib_wq sq; struct mlx4_ib_wq sq;
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/log2.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include <rdma/ib_pack.h> #include <rdma/ib_pack.h>
...@@ -111,16 +113,87 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) ...@@ -111,16 +113,87 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
/* /*
* Stamp a SQ WQE so that it is invalid if prefetched by marking the * Stamp a SQ WQE so that it is invalid if prefetched by marking the
* first four bytes of every 64 byte chunk with 0xffffffff, except for * first four bytes of every 64 byte chunk with
* the very first chunk of the WQE. * 0x7FFFFFF | (invalid_ownership_value << 31).
*
* When the max work request size is less than or equal to the WQE
* basic block size, as an optimization, we can stamp all WQEs with
* 0xffffffff, and skip the very first chunk of each WQE.
*/ */
static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
{ {
u32 *wqe = get_send_wqe(qp, n); u32 *wqe;
int i; int i;
int s;
int ind;
void *buf;
__be32 stamp;
s = roundup(size, 1U << qp->sq.wqe_shift);
if (qp->sq_max_wqes_per_wr > 1) {
for (i = 0; i < s; i += 64) {
ind = (i >> qp->sq.wqe_shift) + n;
stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
cpu_to_be32(0xffffffff);
buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
*wqe = stamp;
}
} else {
buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
for (i = 64; i < s; i += 64) {
wqe = buf + i;
*wqe = 0xffffffff;
}
}
}
static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
{
struct mlx4_wqe_ctrl_seg *ctrl;
struct mlx4_wqe_inline_seg *inl;
void *wqe;
int s;
ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
s = sizeof(struct mlx4_wqe_ctrl_seg);
if (qp->ibqp.qp_type == IB_QPT_UD) {
struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
struct mlx4_av *av = (struct mlx4_av *)dgram->av;
memset(dgram, 0, sizeof *dgram);
av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
s += sizeof(struct mlx4_wqe_datagram_seg);
}
/* Pad the remainder of the WQE with an inline data segment. */
if (size > s) {
inl = wqe + s;
inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
}
ctrl->srcrb_flags = 0;
ctrl->fence_size = size / 16;
/*
* Make sure descriptor is fully written before setting ownership bit
* (because HW can start executing as soon as we do).
*/
wmb();
ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
(n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
for (i = 16; i < 1 << (qp->sq.wqe_shift - 2); i += 16) stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
wqe[i] = 0xffffffff; }
/* Post NOP WQE to prevent wrap-around in the middle of WR */
static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
{
unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
if (unlikely(s < qp->sq_max_wqes_per_wr)) {
post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
ind += s;
}
return ind;
} }
static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
...@@ -237,6 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, ...@@ -237,6 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
enum ib_qp_type type, struct mlx4_ib_qp *qp) enum ib_qp_type type, struct mlx4_ib_qp *qp)
{ {
int s;
/* Sanity check SQ size before proceeding */ /* Sanity check SQ size before proceeding */
if (cap->max_send_wr > dev->dev->caps.max_wqes || if (cap->max_send_wr > dev->dev->caps.max_wqes ||
cap->max_send_sge > dev->dev->caps.max_sq_sg || cap->max_send_sge > dev->dev->caps.max_sq_sg ||
...@@ -252,20 +327,74 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, ...@@ -252,20 +327,74 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
return -EINVAL; return -EINVAL;
qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge * s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
sizeof (struct mlx4_wqe_data_seg), cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
cap->max_inline_data + send_wqe_overhead(type);
sizeof (struct mlx4_wqe_inline_seg)) +
send_wqe_overhead(type)));
qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) /
sizeof (struct mlx4_wqe_data_seg);
/* /*
* We need to leave 2 KB + 1 WQE of headroom in the SQ to * Hermon supports shrinking WQEs, such that a single work
* allow HW to prefetch. * request can include multiple units of 1 << wqe_shift. This
* way, work requests can differ in size, and do not have to
* be a power of 2 in size, saving memory and speeding up send
* WR posting. Unfortunately, if we do this then the
* wqe_index field in CQEs can't be used to look up the WR ID
* anymore, so we do this only if selective signaling is off.
*
* Further, on 32-bit platforms, we can't use vmap() to make
* the QP buffer virtually contigious. Thus we have to use
* constant-sized WRs to make sure a WR is always fully within
* a single page-sized chunk.
*
* Finally, we use NOP work requests to pad the end of the
* work queue, to avoid wrap-around in the middle of WR. We
* set NEC bit to avoid getting completions with error for
* these NOP WRs, but since NEC is only supported starting
* with firmware 2.2.232, we use constant-sized WRs for older
* firmware.
*
* And, since MLX QPs only support SEND, we use constant-sized
* WRs in this case.
*
* We look for the smallest value of wqe_shift such that the
* resulting number of wqes does not exceed device
* capabilities.
*
* We set WQE size to at least 64 bytes, this way stamping
* invalidates each WQE.
*/ */
qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1; if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + qp->sq_spare_wqes); qp->sq_signal_bits && BITS_PER_LONG == 64 &&
type != IB_QPT_SMI && type != IB_QPT_GSI)
qp->sq.wqe_shift = ilog2(64);
else
qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
for (;;) {
if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz)
return -EINVAL;
qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
/*
* We need to leave 2 KB + 1 WR of headroom in the SQ to
* allow HW to prefetch.
*/
qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
qp->sq_max_wqes_per_wr +
qp->sq_spare_wqes);
if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
break;
if (qp->sq_max_wqes_per_wr <= 1)
return -EINVAL;
++qp->sq.wqe_shift;
}
qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg);
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift); (qp->sq.wqe_cnt << qp->sq.wqe_shift);
...@@ -277,7 +406,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, ...@@ -277,7 +406,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
qp->sq.offset = 0; qp->sq.offset = 0;
} }
cap->max_send_wr = qp->sq.max_post = qp->sq.wqe_cnt - qp->sq_spare_wqes; cap->max_send_wr = qp->sq.max_post =
(qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
cap->max_send_sge = qp->sq.max_gs; cap->max_send_sge = qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */ /* We don't support inline sends for kernel QPs (yet) */
cap->max_inline_data = 0; cap->max_inline_data = 0;
...@@ -323,6 +453,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -323,6 +453,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
qp->rq.tail = 0; qp->rq.tail = 0;
qp->sq.head = 0; qp->sq.head = 0;
qp->sq.tail = 0; qp->sq.tail = 0;
qp->sq_next_wqe = 0;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
else
qp->sq_signal_bits = 0;
err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp); err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp);
if (err) if (err)
...@@ -413,11 +549,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -413,11 +549,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
*/ */
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
else
qp->sq_signal_bits = 0;
qp->mqp.event = mlx4_ib_qp_event; qp->mqp.event = mlx4_ib_qp_event;
return 0; return 0;
...@@ -912,7 +1043,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -912,7 +1043,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
ctrl = get_send_wqe(qp, i); ctrl = get_send_wqe(qp, i);
ctrl->owner_opcode = cpu_to_be32(1 << 31); ctrl->owner_opcode = cpu_to_be32(1 << 31);
stamp_send_wqe(qp, i); stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
} }
} }
...@@ -965,6 +1096,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -965,6 +1096,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
qp->rq.tail = 0; qp->rq.tail = 0;
qp->sq.head = 0; qp->sq.head = 0;
qp->sq.tail = 0; qp->sq.tail = 0;
qp->sq_next_wqe = 0;
if (!ibqp->srq) if (!ibqp->srq)
*qp->db.db = 0; *qp->db.db = 0;
} }
...@@ -1274,13 +1406,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1274,13 +1406,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags; unsigned long flags;
int nreq; int nreq;
int err = 0; int err = 0;
int ind; unsigned ind;
int size; int uninitialized_var(stamp);
int uninitialized_var(size);
int i; int i;
spin_lock_irqsave(&qp->sq.lock, flags); spin_lock_irqsave(&qp->sq.lock, flags);
ind = qp->sq.head; ind = qp->sq_next_wqe;
for (nreq = 0; wr; ++nreq, wr = wr->next) { for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
...@@ -1296,7 +1429,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1296,7 +1429,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
} }
ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id; qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
ctrl->srcrb_flags = ctrl->srcrb_flags =
(wr->send_flags & IB_SEND_SIGNALED ? (wr->send_flags & IB_SEND_SIGNALED ?
...@@ -1409,16 +1542,23 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1409,16 +1542,23 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
stamp = ind + qp->sq_spare_wqes;
ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
/* /*
* We can improve latency by not stamping the last * We can improve latency by not stamping the last
* send queue WQE until after ringing the doorbell, so * send queue WQE until after ringing the doorbell, so
* only stamp here if there are still more WQEs to post. * only stamp here if there are still more WQEs to post.
*
* Same optimization applies to padding with NOP wqe
* in case of WQE shrinking (used to prevent wrap-around
* in the middle of WR).
*/ */
if (wr->next) if (wr->next) {
stamp_send_wqe(qp, (ind + qp->sq_spare_wqes) & stamp_send_wqe(qp, stamp, size * 16);
(qp->sq.wqe_cnt - 1)); ind = pad_wraparound(qp, ind);
}
++ind;
} }
out: out:
...@@ -1440,8 +1580,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1440,8 +1580,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*/ */
mmiowb(); mmiowb();
stamp_send_wqe(qp, (ind + qp->sq_spare_wqes - 1) & stamp_send_wqe(qp, stamp, size * 16);
(qp->sq.wqe_cnt - 1));
ind = pad_wraparound(qp, ind);
qp->sq_next_wqe = ind;
} }
spin_unlock_irqrestore(&qp->sq.lock, flags); spin_unlock_irqrestore(&qp->sq.lock, flags);
......
...@@ -133,6 +133,11 @@ enum { ...@@ -133,6 +133,11 @@ enum {
MLX4_STAT_RATE_OFFSET = 5 MLX4_STAT_RATE_OFFSET = 5
}; };
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
}
struct mlx4_caps { struct mlx4_caps {
u64 fw_ver; u64 fw_ver;
int num_ports; int num_ports;
......
...@@ -154,7 +154,11 @@ struct mlx4_qp_context { ...@@ -154,7 +154,11 @@ struct mlx4_qp_context {
u32 reserved5[10]; u32 reserved5[10];
}; };
/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
enum { enum {
MLX4_WQE_CTRL_NEC = 1 << 29,
MLX4_WQE_CTRL_FENCE = 1 << 6, MLX4_WQE_CTRL_FENCE = 1 << 6,
MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
MLX4_WQE_CTRL_SOLICITED = 1 << 1, MLX4_WQE_CTRL_SOLICITED = 1 << 1,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment