Commit 5163b274 authored by Maor Gottlieb's avatar Maor Gottlieb Committed by Jason Gunthorpe

RDMA/mlx5: Refactor affinity related code

Move affinity related code in modify qp to function.  It's a preparation
for next patch the extend the affinity calculation to consider the xmit
slave.

Link: https://lore.kernel.org/r/20200430192146.12863-16-maorg@mellanox.comSigned-off-by: default avatarMaor Gottlieb <maorg@mellanox.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 51aab126
......@@ -3582,33 +3582,61 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return 0;
}
static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
struct mlx5_ib_pd *pd,
struct mlx5_ib_qp_base *qp_base,
u8 port_num, struct ib_udata *udata)
static unsigned int get_tx_affinity_rr(struct mlx5_ib_dev *dev,
struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
unsigned int tx_port_affinity;
u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
atomic_t *tx_port_affinity;
if (ucontext)
tx_port_affinity = &ucontext->tx_port_affinity;
else
tx_port_affinity = &dev->port[port_num].roce.tx_port_affinity;
return (unsigned int)atomic_add_return(1, tx_port_affinity) %
MLX5_MAX_PORTS + 1;
}
static bool qp_supports_affinity(struct ib_qp *qp)
{
struct mlx5_ib_qp *mqp = to_mqp(qp);
if (ucontext) {
tx_port_affinity = (unsigned int)atomic_add_return(
1, &ucontext->tx_port_affinity) %
MLX5_MAX_PORTS +
1;
if ((qp->qp_type == IB_QPT_RC) ||
(qp->qp_type == IB_QPT_UD &&
!(mqp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)) ||
(qp->qp_type == IB_QPT_UC) ||
(qp->qp_type == IB_QPT_RAW_PACKET) ||
(qp->qp_type == IB_QPT_XRC_INI) ||
(qp->qp_type == IB_QPT_XRC_TGT))
return true;
return false;
}
static unsigned int get_tx_affinity(struct ib_qp *qp, u8 init,
struct ib_udata *udata)
{
struct mlx5_ib_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct mlx5_ib_ucontext, ibucontext);
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
struct mlx5_ib_qp_base *qp_base;
unsigned int tx_affinity;
if (!(dev->lag_active && init && qp_supports_affinity(qp)))
return 0;
tx_affinity = get_tx_affinity_rr(dev, udata);
qp_base = &mqp->trans_qp.base;
if (ucontext)
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
tx_port_affinity, qp_base->mqp.qpn, ucontext);
} else {
tx_port_affinity =
(unsigned int)atomic_add_return(
1, &dev->port[port_num].roce.tx_port_affinity) %
MLX5_MAX_PORTS +
1;
tx_affinity, qp_base->mqp.qpn, ucontext);
else
mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
tx_port_affinity, qp_base->mqp.qpn);
}
return tx_port_affinity;
tx_affinity, qp_base->mqp.qpn);
return tx_affinity;
}
static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
......@@ -3718,22 +3746,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
}
}
if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
if ((ibqp->qp_type == IB_QPT_RC) ||
(ibqp->qp_type == IB_QPT_UD &&
!(qp->flags & MLX5_IB_QP_CREATE_SQPN_QP1)) ||
(ibqp->qp_type == IB_QPT_UC) ||
(ibqp->qp_type == IB_QPT_RAW_PACKET) ||
(ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (dev->lag_active) {
u8 p = mlx5_core_native_port_num(dev->mdev) - 1;
tx_affinity = get_tx_affinity(dev, pd, base, p,
udata);
tx_affinity = get_tx_affinity(ibqp,
cur_state == IB_QPS_RESET &&
new_state == IB_QPS_INIT, udata);
context->flags |= cpu_to_be32(tx_affinity << 24);
}
}
}
if (is_sqp(ibqp->qp_type)) {
context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment