Commit 7416790e authored by Parav Pandit's avatar Parav Pandit Committed by Jason Gunthorpe

RDMA/core: Introduce and use API to read port immutable data

Currently mlx5 driver caches port GID table length for 2 ports.  It is
also cached by IB core as port immutable data.

When mlx5 representor ports are present, which are usually more than 2,
invalid access to port_caps array can happen while validating the GID
table length which is only for 2 ports.

To avoid this, take help of the IB cores port immutable data by exposing
an API to read the port immutable fields.

Remove mlx5 driver's internal cache, thereby reduce code and data.

Link: https://lore.kernel.org/r/20210203130133.4057329-5-leon@kernel.orgSigned-off-by: default avatarParav Pandit <parav@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 7a58779e
...@@ -848,6 +848,20 @@ static int setup_port_data(struct ib_device *device) ...@@ -848,6 +848,20 @@ static int setup_port_data(struct ib_device *device)
return 0; return 0;
} }
/**
* ib_port_immutable_read() - Read rdma port's immutable data
* @dev - IB device
* @port - port number whose immutable data to read. It starts with index 1 and
* valid upto including rdma_end_port().
*/
const struct ib_port_immutable*
ib_port_immutable_read(struct ib_device *dev, unsigned int port)
{
WARN_ON(!rdma_is_port_valid(dev, port));
return &dev->port_data[port].immutable;
}
EXPORT_SYMBOL(ib_port_immutable_read);
void ib_get_device_fw_str(struct ib_device *dev, char *str) void ib_get_device_fw_str(struct ib_device *dev, char *str)
{ {
if (dev->ops.get_dev_fw_str) if (dev->ops.get_dev_fw_str)
......
...@@ -2964,41 +2964,6 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev) ...@@ -2964,41 +2964,6 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
mlx5_query_ext_port_caps(dev, port); mlx5_query_ext_port_caps(dev, port);
} }
static int __get_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
struct ib_port_attr *pprops = NULL;
int err = -ENOMEM;
pprops = kzalloc(sizeof(*pprops), GFP_KERNEL);
if (!pprops)
goto out;
err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
if (err) {
mlx5_ib_warn(dev, "query_port %d failed %d\n",
port, err);
goto out;
}
dev->port_caps[port - 1].gid_table_len = pprops->gid_tbl_len;
mlx5_ib_dbg(dev, "port %d: pkey_table_len %d, gid_table_len %d\n",
port, dev->pkey_table_len, pprops->gid_tbl_len);
out:
kfree(pprops);
return err;
}
static int get_port_caps(struct mlx5_ib_dev *dev, u8 port)
{
/* For representors use port 1, is this is the only native
* port
*/
if (dev->is_rep)
return __get_port_caps(dev, 1);
return __get_port_caps(dev, port);
}
static u8 mlx5_get_umr_fence(u8 umr_fence_cap) static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
{ {
switch (umr_fence_cap) { switch (umr_fence_cap) {
...@@ -3472,10 +3437,6 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, ...@@ -3472,10 +3437,6 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev,
if (err) if (err)
goto unbind; goto unbind;
err = get_port_caps(ibdev, mlx5_core_native_port_num(mpi->mdev));
if (err)
goto unbind;
err = mlx5_add_netdev_notifier(ibdev, port_num); err = mlx5_add_netdev_notifier(ibdev, port_num);
if (err) { if (err) {
mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n", mlx5_ib_err(ibdev, "failed adding netdev notifier for port %u\n",
...@@ -3553,12 +3514,10 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) ...@@ -3553,12 +3514,10 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
break; break;
} }
} }
if (!bound) { if (!bound)
get_port_caps(dev, i + 1);
mlx5_ib_dbg(dev, "no free port found for port %d\n", mlx5_ib_dbg(dev, "no free port found for port %d\n",
i + 1); i + 1);
} }
}
list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list); list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list);
mutex_unlock(&mlx5_ib_multiport_mutex); mutex_unlock(&mlx5_ib_multiport_mutex);
...@@ -3940,18 +3899,6 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) ...@@ -3940,18 +3899,6 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
if (err) if (err)
goto err_mp; goto err_mp;
if (!mlx5_core_mp_enabled(mdev)) {
for (i = 1; i <= dev->num_ports; i++) {
err = get_port_caps(dev, i);
if (err)
break;
}
} else {
err = get_port_caps(dev, mlx5_core_native_port_num(mdev));
}
if (err)
goto err_mp;
err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len); err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len);
if (err) if (err)
goto err_mp; goto err_mp;
......
...@@ -1037,7 +1037,6 @@ struct mlx5_var_table { ...@@ -1037,7 +1037,6 @@ struct mlx5_var_table {
}; };
struct mlx5_port_caps { struct mlx5_port_caps {
int gid_table_len;
bool has_smi; bool has_smi;
u8 ext_port_cap; u8 ext_port_cap;
}; };
......
...@@ -3176,11 +3176,13 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -3176,11 +3176,13 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
alt ? attr->alt_pkey_index : attr->pkey_index); alt ? attr->alt_pkey_index : attr->pkey_index);
if (ah_flags & IB_AH_GRH) { if (ah_flags & IB_AH_GRH) {
if (grh->sgid_index >= const struct ib_port_immutable *immutable;
dev->port_caps[port - 1].gid_table_len) {
immutable = ib_port_immutable_read(&dev->ib_dev, port);
if (grh->sgid_index >= immutable->gid_tbl_len) {
pr_err("sgid_index (%u) too large. max is %d\n", pr_err("sgid_index (%u) too large. max is %d\n",
grh->sgid_index, grh->sgid_index,
dev->port_caps[port - 1].gid_table_len); immutable->gid_tbl_len);
return -EINVAL; return -EINVAL;
} }
} }
......
...@@ -4674,4 +4674,7 @@ static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn) ...@@ -4674,4 +4674,7 @@ static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
return (u32)(v & IB_GRH_FLOWLABEL_MASK); return (u32)(v & IB_GRH_FLOWLABEL_MASK);
} }
const struct ib_port_immutable*
ib_port_immutable_read(struct ib_device *dev, unsigned int port);
#endif /* IB_VERBS_H */ #endif /* IB_VERBS_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment