Commit 02648b4b authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5: Generalize name of UMR alignment definition

Per the device spec, MLX5_UMR_MTT_ALIGNMENT is good not only for UMR MTT
entries, but for all other entries as well, like KLMs and KSMs.
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Reviewed-by: default avatarGal Pressman <gal@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 683d78a0
...@@ -230,8 +230,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni, ...@@ -230,8 +230,7 @@ static bool mlx5_ib_invalidate_range(struct mmu_interval_notifier *mni,
struct ib_umem_odp *umem_odp = struct ib_umem_odp *umem_odp =
container_of(mni, struct ib_umem_odp, notifier); container_of(mni, struct ib_umem_odp, notifier);
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT / const u64 umr_block_mask = MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1;
sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0; u64 idx = 0, blk_start_idx = 0;
u64 invalidations = 0; u64 invalidations = 0;
unsigned long start; unsigned long start;
......
...@@ -418,7 +418,7 @@ int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, ...@@ -418,7 +418,7 @@ int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
} }
#define MLX5_MAX_UMR_CHUNK \ #define MLX5_MAX_UMR_CHUNK \
((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_MTT_ALIGNMENT) ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - MLX5_UMR_FLEX_ALIGNMENT)
#define MLX5_SPARE_UMR_CHUNK 0x10000 #define MLX5_SPARE_UMR_CHUNK 0x10000
/* /*
...@@ -428,11 +428,11 @@ int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, ...@@ -428,11 +428,11 @@ int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
*/ */
static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask) static void *mlx5r_umr_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
{ {
const size_t xlt_chunk_align = MLX5_UMR_MTT_ALIGNMENT / ent_size; const size_t xlt_chunk_align = MLX5_UMR_FLEX_ALIGNMENT / ent_size;
size_t size; size_t size;
void *res = NULL; void *res = NULL;
static_assert(PAGE_SIZE % MLX5_UMR_MTT_ALIGNMENT == 0); static_assert(PAGE_SIZE % MLX5_UMR_FLEX_ALIGNMENT == 0);
/* /*
* MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the * MLX5_IB_UPD_XLT_ATOMIC doesn't signal an atomic context just that the
...@@ -666,7 +666,7 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags) ...@@ -666,7 +666,7 @@ int mlx5r_umr_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags)
} }
final_size = (void *)cur_mtt - (void *)mtt; final_size = (void *)cur_mtt - (void *)mtt;
sg.length = ALIGN(final_size, MLX5_UMR_MTT_ALIGNMENT); sg.length = ALIGN(final_size, MLX5_UMR_FLEX_ALIGNMENT);
memset(cur_mtt, 0, sg.length - final_size); memset(cur_mtt, 0, sg.length - final_size);
mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags); mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
...@@ -690,7 +690,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, ...@@ -690,7 +690,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT) int desc_size = (flags & MLX5_IB_UPD_XLT_INDIRECT)
? sizeof(struct mlx5_klm) ? sizeof(struct mlx5_klm)
: sizeof(struct mlx5_mtt); : sizeof(struct mlx5_mtt);
const int page_align = MLX5_UMR_MTT_ALIGNMENT / desc_size; const int page_align = MLX5_UMR_FLEX_ALIGNMENT / desc_size;
struct mlx5_ib_dev *dev = mr_to_mdev(mr); struct mlx5_ib_dev *dev = mr_to_mdev(mr);
struct device *ddev = &dev->mdev->pdev->dev; struct device *ddev = &dev->mdev->pdev->dev;
const int page_mask = page_align - 1; const int page_mask = page_align - 1;
...@@ -711,7 +711,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, ...@@ -711,7 +711,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
if (WARN_ON(!mr->umem->is_odp)) if (WARN_ON(!mr->umem->is_odp))
return -EINVAL; return -EINVAL;
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes, /* UMR copies MTTs in units of MLX5_UMR_FLEX_ALIGNMENT bytes,
* so we need to align the offset and length accordingly * so we need to align the offset and length accordingly
*/ */
if (idx & page_mask) { if (idx & page_mask) {
...@@ -748,7 +748,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages, ...@@ -748,7 +748,7 @@ int mlx5r_umr_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags); mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
dma_sync_single_for_device(ddev, sg.addr, sg.length, dma_sync_single_for_device(ddev, sg.addr, sg.length,
DMA_TO_DEVICE); DMA_TO_DEVICE);
sg.length = ALIGN(size_to_map, MLX5_UMR_MTT_ALIGNMENT); sg.length = ALIGN(size_to_map, MLX5_UMR_FLEX_ALIGNMENT);
if (pages_mapped + pages_iter >= pages_to_map) if (pages_mapped + pages_iter >= pages_to_map)
mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags); mlx5r_umr_final_update_xlt(dev, &wqe, mr, &sg, flags);
......
...@@ -103,11 +103,11 @@ struct page_pool; ...@@ -103,11 +103,11 @@ struct page_pool;
* size actually used at runtime, but it's not a problem when calculating static * size actually used at runtime, but it's not a problem when calculating static
* array sizes. * array sizes.
*/ */
#define MLX5_UMR_MAX_MTT_SPACE \ #define MLX5_UMR_MAX_FLEX_SPACE \
(ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \ (ALIGN_DOWN(MLX5_SEND_WQE_MAX_SIZE - sizeof(struct mlx5e_umr_wqe), \
MLX5_UMR_MTT_ALIGNMENT)) MLX5_UMR_FLEX_ALIGNMENT))
#define MLX5_MPWRQ_MAX_PAGES_PER_WQE \ #define MLX5_MPWRQ_MAX_PAGES_PER_WQE \
rounddown_pow_of_two(MLX5_UMR_MAX_MTT_SPACE / sizeof(struct mlx5_mtt)) rounddown_pow_of_two(MLX5_UMR_MAX_FLEX_SPACE / sizeof(struct mlx5_mtt))
#define MLX5E_MAX_RQ_NUM_MTTS \ #define MLX5E_MAX_RQ_NUM_MTTS \
(ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */ (ALIGN_DOWN(U16_MAX, 4) * 2) /* Fits into u16 and aligned by WQEBB. */
......
...@@ -107,7 +107,7 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, ...@@ -107,7 +107,7 @@ u8 mlx5e_mpwrq_log_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
/* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */ /* Keep in sync with MLX5_MPWRQ_MAX_PAGES_PER_WQE. */
max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB; max_wqe_size = mlx5e_get_max_sq_aligned_wqebbs(mdev) * MLX5_SEND_WQE_BB;
max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe), max_pages_per_wqe = ALIGN_DOWN(max_wqe_size - sizeof(struct mlx5e_umr_wqe),
MLX5_UMR_MTT_ALIGNMENT) / umr_entry_size; MLX5_UMR_FLEX_ALIGNMENT) / umr_entry_size;
max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift; max_log_mpwqe_size = ilog2(max_pages_per_wqe) + page_shift;
WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU); WARN_ON_ONCE(max_log_mpwqe_size < MLX5E_ORDER2_MAX_PACKET_MTU);
...@@ -146,7 +146,7 @@ u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift, ...@@ -146,7 +146,7 @@ u16 mlx5e_mpwrq_umr_wqe_sz(struct mlx5_core_dev *mdev, u8 page_shift,
u16 umr_wqe_sz; u16 umr_wqe_sz;
umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) + umr_wqe_sz = sizeof(struct mlx5e_umr_wqe) +
ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT); ALIGN(pages_per_wqe * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK); WARN_ON_ONCE(DIV_ROUND_UP(umr_wqe_sz, MLX5_SEND_WQE_DS) > MLX5_WQE_CTRL_DS_MASK);
......
...@@ -208,7 +208,7 @@ static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_ ...@@ -208,7 +208,7 @@ static u16 mlx5e_mpwrq_umr_octowords(u32 entries, enum mlx5e_mpwrq_umr_mode umr_
u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode); u8 umr_entry_size = mlx5e_mpwrq_umr_entry_size(umr_mode);
u32 sz; u32 sz;
sz = ALIGN(entries * umr_entry_size, MLX5_UMR_MTT_ALIGNMENT); sz = ALIGN(entries * umr_entry_size, MLX5_UMR_FLEX_ALIGNMENT);
return sz / MLX5_OCTWORD; return sz / MLX5_OCTWORD;
} }
......
...@@ -291,8 +291,8 @@ enum { ...@@ -291,8 +291,8 @@ enum {
}; };
#define MLX5_UMR_KLM_ALIGNMENT 4 #define MLX5_UMR_KLM_ALIGNMENT 4
#define MLX5_UMR_MTT_ALIGNMENT 0x40 #define MLX5_UMR_FLEX_ALIGNMENT 0x40
#define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_MTT_ALIGNMENT / sizeof(struct mlx5_mtt)) #define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8) #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment