Commit 2be08c30 authored by Leon Romanovsky's avatar Leon Romanovsky Committed by Jason Gunthorpe

RDMA/mlx5: Delete create QP flags obfuscation

There is no point in redefinition of stable and exposed to users create
flags. Their values won't be changed and it is equal to used by the
mlx5. Delete the mlx5 definitions and use IB/core fields.

Link: https://lore.kernel.org/r/20200427154636.381474-14-leon@kernel.orgReviewed-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 5d0dc3d9
...@@ -615,7 +615,7 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs, ...@@ -615,7 +615,7 @@ static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
enum ib_qp_type qp_type = qp->ibqp.qp_type; enum ib_qp_type qp_type = qp->ibqp.qp_type;
if (qp_type == IB_QPT_RAW_PACKET || if (qp_type == IB_QPT_RAW_PACKET ||
(qp->flags & MLX5_IB_QP_UNDERLAY)) { (qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
struct mlx5_ib_raw_packet_qp *raw_packet_qp = struct mlx5_ib_raw_packet_qp *raw_packet_qp =
&qp->raw_packet_qp; &qp->raw_packet_qp;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq; struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
......
...@@ -142,7 +142,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)( ...@@ -142,7 +142,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
return -EINVAL; return -EINVAL;
mqp = to_mqp(qp); mqp = to_mqp(qp);
if (mqp->flags & MLX5_IB_QP_RSS) if (mqp->is_rss)
dest_id = mqp->rss_qp.tirn; dest_id = mqp->rss_qp.tirn;
else else
dest_id = mqp->raw_packet_qp.rq.tirn; dest_id = mqp->raw_packet_qp.rq.tirn;
......
...@@ -3967,7 +3967,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, ...@@ -3967,7 +3967,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT; dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
} else { } else {
dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
if (mqp->flags & MLX5_IB_QP_RSS) if (mqp->is_rss)
dst->tir_num = mqp->rss_qp.tirn; dst->tir_num = mqp->rss_qp.tirn;
else else
dst->tir_num = mqp->raw_packet_qp.rq.tirn; dst->tir_num = mqp->raw_packet_qp.rq.tirn;
...@@ -3978,7 +3978,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, ...@@ -3978,7 +3978,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
handler = create_dont_trap_rule(dev, ft_prio, handler = create_dont_trap_rule(dev, ft_prio,
flow_attr, dst); flow_attr, dst);
} else { } else {
underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ? underlay_qpn = (mqp->flags & IB_QP_CREATE_SOURCE_QPN) ?
mqp->underlay_qpn : 0; mqp->underlay_qpn : 0;
handler = _create_flow_rule(dev, ft_prio, flow_attr, handler = _create_flow_rule(dev, ft_prio, flow_attr,
dst, underlay_qpn, ucmd); dst, underlay_qpn, ucmd);
...@@ -4447,7 +4447,7 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) ...@@ -4447,7 +4447,7 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
uid = ibqp->pd ? uid = ibqp->pd ?
to_mpd(ibqp->pd)->uid : 0; to_mpd(ibqp->pd)->uid : 0;
if (mqp->flags & MLX5_IB_QP_UNDERLAY) { if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) {
mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n"); mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
......
...@@ -450,7 +450,8 @@ struct mlx5_ib_qp { ...@@ -450,7 +450,8 @@ struct mlx5_ib_qp {
int scat_cqe; int scat_cqe;
int max_inline_data; int max_inline_data;
struct mlx5_bf bf; struct mlx5_bf bf;
int has_rq; u8 has_rq:1;
u8 is_rss:1;
/* only for user space QPs. For kernel /* only for user space QPs. For kernel
* we have it from the bf object * we have it from the bf object
...@@ -481,24 +482,6 @@ struct mlx5_ib_cq_buf { ...@@ -481,24 +482,6 @@ struct mlx5_ib_cq_buf {
int nent; int nent;
}; };
enum mlx5_ib_qp_flags {
MLX5_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX5_IB_QP_CROSS_CHANNEL = IB_QP_CREATE_CROSS_CHANNEL,
MLX5_IB_QP_MANAGED_SEND = IB_QP_CREATE_MANAGED_SEND,
MLX5_IB_QP_MANAGED_RECV = IB_QP_CREATE_MANAGED_RECV,
MLX5_IB_QP_SIGNATURE_HANDLING = 1 << 5,
/* QP uses 1 as its source QP number */
MLX5_IB_QP_SQPN_QP1 = 1 << 6,
MLX5_IB_QP_CAP_SCATTER_FCS = 1 << 7,
MLX5_IB_QP_RSS = 1 << 8,
MLX5_IB_QP_CVLAN_STRIPPING = 1 << 9,
MLX5_IB_QP_UNDERLAY = 1 << 10,
MLX5_IB_QP_PCI_WRITE_END_PADDING = 1 << 11,
MLX5_IB_QP_TUNNEL_OFFLOAD = 1 << 12,
MLX5_IB_QP_PACKET_BASED_CREDIT = 1 << 13,
};
struct mlx5_umr_wr { struct mlx5_umr_wr {
struct ib_send_wr wr; struct ib_send_wr wr;
u64 virt_addr; u64 virt_addr;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment