Commit b9294f8b authored by Israel Rukshin's avatar Israel Rukshin Committed by Jason Gunthorpe

IB/iser: Unwind WR union at iser_tx_desc

After decreasing WRs array size from 7 to 3 it is more
readable to give each WR a descriptive name.
Signed-off-by: default avatarIsrael Rukshin <israelr@mellanox.com>
Reviewed-by: default avatarMax Gurtovoy <maxg@mellanox.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent b76a4399
...@@ -205,7 +205,8 @@ iser_initialize_task_headers(struct iscsi_task *task, ...@@ -205,7 +205,8 @@ iser_initialize_task_headers(struct iscsi_task *task,
goto out; goto out;
} }
tx_desc->wr_idx = 0; tx_desc->inv_wr.next = NULL;
tx_desc->reg_wr.wr.next = NULL;
tx_desc->mapped = true; tx_desc->mapped = true;
tx_desc->dma_addr = dma_addr; tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr; tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
......
...@@ -225,12 +225,6 @@ enum iser_desc_type { ...@@ -225,12 +225,6 @@ enum iser_desc_type {
ISCSI_TX_DATAOUT ISCSI_TX_DATAOUT
}; };
/*
* Maximum number of work requests per task
* (invalidate, registration, send)
*/
#define ISER_MAX_WRS 3
/** /**
* struct iser_tx_desc - iSER TX descriptor * struct iser_tx_desc - iSER TX descriptor
* *
...@@ -243,8 +237,9 @@ enum iser_desc_type { ...@@ -243,8 +237,9 @@ enum iser_desc_type {
* unsolicited data-out or control * unsolicited data-out or control
* @num_sge: number sges used on this TX task * @num_sge: number sges used on this TX task
* @mapped: Is the task header mapped * @mapped: Is the task header mapped
* @wr_idx: Current WR index * reg_wr: registration WR
* @wrs: Array of WRs per task * send_wr: send WR
* inv_wr: invalidate WR
*/ */
struct iser_tx_desc { struct iser_tx_desc {
struct iser_ctrl iser_header; struct iser_ctrl iser_header;
...@@ -255,11 +250,9 @@ struct iser_tx_desc { ...@@ -255,11 +250,9 @@ struct iser_tx_desc {
int num_sge; int num_sge;
struct ib_cqe cqe; struct ib_cqe cqe;
bool mapped; bool mapped;
u8 wr_idx; struct ib_reg_wr reg_wr;
union iser_wr { struct ib_send_wr send_wr;
struct ib_send_wr send; struct ib_send_wr inv_wr;
struct ib_reg_wr fast_reg;
} wrs[ISER_MAX_WRS];
}; };
#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ #define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
...@@ -652,21 +645,6 @@ void ...@@ -652,21 +645,6 @@ void
iser_reg_desc_put_fmr(struct ib_conn *ib_conn, iser_reg_desc_put_fmr(struct ib_conn *ib_conn,
struct iser_fr_desc *desc); struct iser_fr_desc *desc);
static inline struct ib_send_wr *
iser_tx_next_wr(struct iser_tx_desc *tx_desc)
{
struct ib_send_wr *cur_wr = &tx_desc->wrs[tx_desc->wr_idx].send;
struct ib_send_wr *last_wr;
if (tx_desc->wr_idx) {
last_wr = &tx_desc->wrs[tx_desc->wr_idx - 1].send;
last_wr->next = cur_wr;
}
tx_desc->wr_idx++;
return cur_wr;
}
static inline struct iser_conn * static inline struct iser_conn *
to_iser_conn(struct ib_conn *ib_conn) to_iser_conn(struct ib_conn *ib_conn)
{ {
......
...@@ -365,13 +365,15 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) ...@@ -365,13 +365,15 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
static inline void static inline void
iser_inv_rkey(struct ib_send_wr *inv_wr, iser_inv_rkey(struct ib_send_wr *inv_wr,
struct ib_mr *mr, struct ib_mr *mr,
struct ib_cqe *cqe) struct ib_cqe *cqe,
struct ib_send_wr *next_wr)
{ {
inv_wr->opcode = IB_WR_LOCAL_INV; inv_wr->opcode = IB_WR_LOCAL_INV;
inv_wr->wr_cqe = cqe; inv_wr->wr_cqe = cqe;
inv_wr->ex.invalidate_rkey = mr->rkey; inv_wr->ex.invalidate_rkey = mr->rkey;
inv_wr->send_flags = 0; inv_wr->send_flags = 0;
inv_wr->num_sge = 0; inv_wr->num_sge = 0;
inv_wr->next = next_wr;
} }
static int static int
...@@ -385,7 +387,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, ...@@ -385,7 +387,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
struct ib_mr *mr = rsc->sig_mr; struct ib_mr *mr = rsc->sig_mr;
struct ib_sig_attrs *sig_attrs = mr->sig_attrs; struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
struct ib_reg_wr *wr; struct ib_reg_wr *wr = &tx_desc->reg_wr;
int ret; int ret;
memset(sig_attrs, 0, sizeof(*sig_attrs)); memset(sig_attrs, 0, sizeof(*sig_attrs));
...@@ -396,7 +398,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, ...@@ -396,7 +398,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask); iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
if (rsc->mr_valid) if (rsc->mr_valid)
iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
...@@ -408,8 +410,8 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, ...@@ -408,8 +410,8 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
goto err; goto err;
} }
wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr);
memset(wr, 0, sizeof(*wr)); memset(wr, 0, sizeof(*wr));
wr->wr.next = &tx_desc->send_wr;
wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
wr->wr.wr_cqe = cqe; wr->wr.wr_cqe = cqe;
wr->wr.num_sge = 0; wr->wr.num_sge = 0;
...@@ -441,11 +443,11 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ...@@ -441,11 +443,11 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct iser_tx_desc *tx_desc = &iser_task->desc; struct iser_tx_desc *tx_desc = &iser_task->desc;
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
struct ib_mr *mr = rsc->mr; struct ib_mr *mr = rsc->mr;
struct ib_reg_wr *wr; struct ib_reg_wr *wr = &tx_desc->reg_wr;
int n; int n;
if (rsc->mr_valid) if (rsc->mr_valid)
iser_inv_rkey(iser_tx_next_wr(tx_desc), mr, cqe); iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
...@@ -456,7 +458,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ...@@ -456,7 +458,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
return n < 0 ? n : -EINVAL; return n < 0 ? n : -EINVAL;
} }
wr = container_of(iser_tx_next_wr(tx_desc), struct ib_reg_wr, wr); wr->wr.next = &tx_desc->send_wr;
wr->wr.opcode = IB_WR_REG_MR; wr->wr.opcode = IB_WR_REG_MR;
wr->wr.wr_cqe = cqe; wr->wr.wr_cqe = cqe;
wr->wr.send_flags = 0; wr->wr.send_flags = 0;
......
...@@ -1037,7 +1037,8 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count) ...@@ -1037,7 +1037,8 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
bool signal) bool signal)
{ {
struct ib_send_wr *wr = iser_tx_next_wr(tx_desc); struct ib_send_wr *wr = &tx_desc->send_wr;
struct ib_send_wr *first_wr;
int ib_ret; int ib_ret;
ib_dma_sync_single_for_device(ib_conn->device->ib_device, ib_dma_sync_single_for_device(ib_conn->device->ib_device,
...@@ -1051,7 +1052,14 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, ...@@ -1051,7 +1052,14 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
wr->opcode = IB_WR_SEND; wr->opcode = IB_WR_SEND;
wr->send_flags = signal ? IB_SEND_SIGNALED : 0; wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, NULL); if (tx_desc->inv_wr.next)
first_wr = &tx_desc->inv_wr;
else if (tx_desc->reg_wr.wr.next)
first_wr = &tx_desc->reg_wr.wr;
else
first_wr = wr;
ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
if (ib_ret) if (ib_ret)
iser_err("ib_post_send failed, ret:%d opcode:%d\n", iser_err("ib_post_send failed, ret:%d opcode:%d\n",
ib_ret, wr->opcode); ib_ret, wr->opcode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment