Commit 272bba19 authored by Ruan Jinjie's avatar Ruan Jinjie Committed by Leon Romanovsky

RDMA: Remove unnecessary ternary operators

There are a little ternary operators, the true or false judgment
of which is unnecessary in C language semantics.
Signed-off-by: default avatarRuan Jinjie <ruanjinjie@huawei.com>
Link: https://lore.kernel.org/r/20230731085118.394443-1-ruanjinjie@huawei.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent f0ff2a2d
......@@ -75,7 +75,7 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
if (type >= RDMA_NL_NUM_CLIENTS)
return false;
return (op < max_num_ops[type]) ? true : false;
return op < max_num_ops[type];
}
static const struct rdma_nl_cbs *
......
......@@ -1336,8 +1336,7 @@ static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
qplqp->pd = &pd->qplib_pd;
qplqp->qp_handle = (u64)qplqp;
qplqp->max_inline_data = init_attr->cap.max_inline_data;
qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
true : false);
qplqp->sig_type = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
qptype = bnxt_re_init_qp_type(rdev, init_attr);
if (qptype < 0) {
rc = qptype;
......@@ -2261,7 +2260,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
}
is_eth = true;
is_vlan = (vlan_id && (vlan_id < 0x1000)) ? true : false;
is_vlan = vlan_id && (vlan_id < 0x1000);
ib_ud_header_init(payload_size, !is_eth, is_eth, is_vlan, is_grh,
ip_version, is_udp, 0, &qp->qp1_hdr);
......
......@@ -433,8 +433,8 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
cctx->modes.dbr_pacing =
le32_to_cpu(resp.flags_ext2) & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ?
true : false;
le32_to_cpu(resp.flags_ext2) &
FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED;
return 0;
}
......@@ -1333,8 +1333,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
*/
if ((prio_map == 0 && rdev->qplib_res.prio) ||
(prio_map != 0 && !rdev->qplib_res.prio)) {
rdev->qplib_res.prio = prio_map ? true : false;
rdev->qplib_res.prio = prio_map;
bnxt_re_update_gid(rdev);
}
......
......@@ -1373,8 +1373,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->state = sb->en_sqd_async_notify_state &
CREQ_QUERY_QP_RESP_SB_STATE_MASK;
qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
true : false;
CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY;
qp->access = sb->access;
qp->pkey_index = le16_to_cpu(sb->pkey);
qp->qkey = le32_to_cpu(sb->qkey);
......
......@@ -78,7 +78,7 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
return false;
}
return hop_num ? true : false;
return hop_num;
}
static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
......
......@@ -282,7 +282,7 @@ int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool read_fence = false;
u16 quanta;
info->push_wqe = qp->push_db ? true : false;
info->push_wqe = qp->push_db;
op_info = &info->op.rdma_write;
if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
......@@ -383,7 +383,7 @@ int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
u16 quanta;
u64 hdr;
info->push_wqe = qp->push_db ? true : false;
info->push_wqe = qp->push_db;
op_info = &info->op.rdma_read;
if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
......@@ -468,7 +468,7 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool read_fence = false;
u16 quanta;
info->push_wqe = qp->push_db ? true : false;
info->push_wqe = qp->push_db;
op_info = &info->op.send;
if (qp->max_sq_frag_cnt < op_info->num_sges)
......@@ -720,7 +720,7 @@ int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
u32 i, total_size = 0;
u16 quanta;
info->push_wqe = qp->push_db ? true : false;
info->push_wqe = qp->push_db;
op_info = &info->op.rdma_write;
if (unlikely(qp->max_sq_frag_cnt < op_info->num_lo_sges))
......@@ -794,7 +794,7 @@ int irdma_uk_inline_send(struct irdma_qp_uk *qp,
u32 i, total_size = 0;
u16 quanta;
info->push_wqe = qp->push_db ? true : false;
info->push_wqe = qp->push_db;
op_info = &info->op.send;
if (unlikely(qp->max_sq_frag_cnt < op_info->num_sges))
......@@ -872,7 +872,7 @@ int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
bool local_fence = false;
struct ib_sge sge = {};
info->push_wqe = qp->push_db ? true : false;
info->push_wqe = qp->push_db;
op_info = &info->op.inv_local_stag;
local_fence = info->local_fence;
......
......@@ -1004,7 +1004,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
refcount_set(&iwqp->refcnt, 1);
spin_lock_init(&iwqp->lock);
spin_lock_init(&iwqp->sc_qp.pfpdu.lock);
iwqp->sig_all = (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ? 1 : 0;
iwqp->sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
rf->qp_table[qp_num] = iwqp;
if (rdma_protocol_roce(&iwdev->ibdev, 1)) {
......@@ -3547,8 +3547,7 @@ static void irdma_process_cqe(struct ib_wc *entry,
set_ib_wc_op_sq(cq_poll_info, entry);
} else {
set_ib_wc_op_rq(cq_poll_info, entry,
qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM ?
true : false);
qp->qp_uk.qp_caps & IRDMA_SEND_WITH_IMM);
if (qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_UD &&
cq_poll_info->stag_invalid_set) {
entry->ex.invalidate_rkey = cq_poll_info->inv_stag;
......
......@@ -1277,7 +1277,7 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
qp->sq.max_sges = attrs->cap.max_send_sge;
qp->rq.max_sges = attrs->cap.max_recv_sge;
qp->state = OCRDMA_QPS_RST;
qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
}
static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
......
......@@ -1358,7 +1358,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
qp->prev_wqe_size = 0;
qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
qp->signaled = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
qp->dev = dev;
if (qedr_qp_has_sq(qp)) {
qedr_reset_qp_hwq_info(&qp->sq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment