Commit cef7dde8 authored by Michael Guralnik's avatar Michael Guralnik Committed by Leon Romanovsky

net/mlx5: Expand mkey page size to support 6 bits

Protect the usage of the 6th bit with the relevant capability to ensure
we are using the new page sizes with FW that supports the bit extension.
Signed-off-by: default avatarMichael Guralnik <michaelgur@nvidia.com>
Link: https://patch.msgid.link/20240909100504.29797-2-michaelgur@nvidia.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent f4ccc0a2
......@@ -63,17 +63,6 @@ __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
return GENMASK(largest_pg_shift, pgsz_shift);
}
/*
* For mkc users, instead of a page_offset the command has a start_iova which
* specifies both the page_offset and the on-the-wire IOVA
*/
#define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \
ib_umem_find_best_pgsz(umem, \
__mlx5_log_page_size_to_bitmap( \
__mlx5_bit_sz(typ, log_pgsz_fld), \
pgsz_shift), \
iova)
static __always_inline unsigned long
__mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
unsigned int offset_shift)
......@@ -1724,4 +1713,20 @@ static inline u32 smi_to_native_portnum(struct mlx5_ib_dev *dev, u32 port)
return (port - 1) / dev->num_ports + 1;
}
/*
* For mkc users, instead of a page_offset the command has a start_iova which
* specifies both the page_offset and the on-the-wire IOVA
*/
static __always_inline unsigned long
mlx5_umem_mkc_find_best_pgsz(struct mlx5_ib_dev *dev, struct ib_umem *umem,
u64 iova)
{
int page_size_bits =
MLX5_CAP_GEN_2(dev->mdev, umr_log_entity_size_5) ? 6 : 5;
unsigned long bitmap =
__mlx5_log_page_size_to_bitmap(page_size_bits, 0);
return ib_umem_find_best_pgsz(umem, bitmap, iova);
}
#endif /* MLX5_IB_H */
......@@ -1120,8 +1120,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
if (umem->is_dmabuf)
page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova);
else
page_size = mlx5_umem_find_best_pgsz(umem, mkc, log_page_size,
0, iova);
page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
if (WARN_ON(!page_size))
return ERR_PTR(-EINVAL);
......@@ -1426,8 +1425,8 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
mr = alloc_cacheable_mr(pd, umem, iova, access_flags,
MLX5_MKC_ACCESS_MODE_MTT);
} else {
unsigned int page_size = mlx5_umem_find_best_pgsz(
umem, mkc, log_page_size, 0, iova);
unsigned int page_size =
mlx5_umem_mkc_find_best_pgsz(dev, umem, iova);
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(pd, umem, iova, access_flags, page_size,
......@@ -1745,8 +1744,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
return false;
*page_size =
mlx5_umem_find_best_pgsz(new_umem, mkc, log_page_size, 0, iova);
*page_size = mlx5_umem_mkc_find_best_pgsz(dev, new_umem, iova);
if (WARN_ON(!*page_size))
return false;
return (mr->mmkey.cache_ent->rb_key.ndescs) >=
......
......@@ -693,7 +693,7 @@ static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
struct ib_umem_dmabuf *umem_dmabuf = to_ib_umem_dmabuf(mr->umem);
u32 xlt_flags = 0;
int err;
unsigned int page_size;
unsigned long page_size;
if (flags & MLX5_PF_FLAGS_ENABLE)
xlt_flags |= MLX5_IB_UPD_XLT_ENABLE;
......
......@@ -1988,7 +1988,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 migratable[0x1];
u8 reserved_at_81[0x11];
u8 query_vuid[0x1];
u8 reserved_at_93[0xd];
u8 reserved_at_93[0x5];
u8 umr_log_entity_size_5[0x1];
u8 reserved_at_99[0x7];
u8 max_reformat_insert_size[0x8];
u8 max_reformat_insert_offset[0x8];
......@@ -4212,8 +4214,7 @@ struct mlx5_ifc_mkc_bits {
u8 reserved_at_1c0[0x19];
u8 relaxed_ordering_read[0x1];
u8 reserved_at_1d9[0x1];
u8 log_page_size[0x5];
u8 log_page_size[0x6];
u8 reserved_at_1e0[0x20];
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment