Commit f49c856a authored by Aharon Landau's avatar Aharon Landau Committed by Jason Gunthorpe

RDMA/mlx5: Move umr checks to umr.h

Move mlx5_ib_can_load_pas_with_umr() and mlx5_ib_can_reconfig_with_umr()
to umr.h and rename them accordingly.

Link: https://lore.kernel.org/r/1b799b0142534a63dfd5bacc5f8ad2256d7777ad.1649747695.git.leonro@nvidia.comSigned-off-by: default avatarAharon Landau <aharonl@nvidia.com>
Reviewed-by: default avatarMichael Guralnik <michaelgur@nvidia.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 04876c12
...@@ -1471,9 +1471,6 @@ static inline int is_qp1(enum ib_qp_type qp_type) ...@@ -1471,9 +1471,6 @@ static inline int is_qp1(enum ib_qp_type qp_type)
return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI; return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
} }
#define MLX5_MAX_UMR_SHIFT 16
#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
static inline u32 check_cq_create_flags(u32 flags) static inline u32 check_cq_create_flags(u32 flags)
{ {
/* /*
...@@ -1545,59 +1542,6 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev, ...@@ -1545,59 +1542,6 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi, u32 bfregn, struct mlx5_bfreg_info *bfregi, u32 bfregn,
bool dyn_bfreg); bool dyn_bfreg);
static inline bool mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev *dev,
size_t length)
{
/*
* umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
* always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
* MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
* can never be enabled without this capability. Simplify this weird
* quirky hardware by just saying it can't use PAS lists with UMR at
* all.
*/
if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
return false;
/*
* length is the size of the MR in bytes when mlx5_ib_update_xlt() is
* used.
*/
if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
return false;
return true;
}
/*
* true if an existing MR can be reconfigured to new access_flags using UMR.
* Older HW cannot use UMR to update certain elements of the MKC. See
* umr_check_mkey_mask(), get_umr_update_access_mask() and umr_check_mkey_mask()
*/
static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
unsigned int current_access_flags,
unsigned int target_access_flags)
{
unsigned int diffs = current_access_flags ^ target_access_flags;
if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
MLX5_CAP_GEN(dev->mdev, atomic) &&
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
return false;
if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
return false;
if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
return false;
return true;
}
static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev, static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
struct mlx5_ib_mkey *mmkey) struct mlx5_ib_mkey *mmkey)
{ {
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <rdma/ib_verbs.h> #include <rdma/ib_verbs.h>
#include "dm.h" #include "dm.h"
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "umr.h"
/* /*
* We can't use an array for xlt_emergency_page because dma_map_single doesn't * We can't use an array for xlt_emergency_page because dma_map_single doesn't
...@@ -598,7 +599,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, ...@@ -598,7 +599,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
struct mlx5_ib_mr *mr; struct mlx5_ib_mr *mr;
/* Matches access in alloc_cache_mr() */ /* Matches access in alloc_cache_mr() */
if (!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags)) if (!mlx5r_umr_can_reconfig(dev, 0, access_flags))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
spin_lock_irq(&ent->lock); spin_lock_irq(&ent->lock);
...@@ -738,7 +739,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) ...@@ -738,7 +739,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT; ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) && if ((dev->mdev->profile.mask & MLX5_PROF_MASK_MR_CACHE) &&
!dev->is_rep && mlx5_core_is_pf(dev->mdev) && !dev->is_rep && mlx5_core_is_pf(dev->mdev) &&
mlx5_ib_can_load_pas_with_umr(dev, 0)) mlx5r_umr_can_load_pas(dev, 0))
ent->limit = dev->mdev->profile.mr_cache[i].limit; ent->limit = dev->mdev->profile.mr_cache[i].limit;
else else
ent->limit = 0; ent->limit = 0;
...@@ -946,7 +947,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, ...@@ -946,7 +947,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
* cache then synchronously create an uncached one. * cache then synchronously create an uncached one.
*/ */
if (!ent || ent->limit == 0 || if (!ent || ent->limit == 0 ||
!mlx5_ib_can_reconfig_with_umr(dev, 0, access_flags)) { !mlx5r_umr_can_reconfig(dev, 0, access_flags)) {
mutex_lock(&dev->slow_path_mutex); mutex_lock(&dev->slow_path_mutex);
mr = reg_create(pd, umem, iova, access_flags, page_size, false); mr = reg_create(pd, umem, iova, access_flags, page_size, false);
mutex_unlock(&dev->slow_path_mutex); mutex_unlock(&dev->slow_path_mutex);
...@@ -1438,7 +1439,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem, ...@@ -1438,7 +1439,7 @@ static struct ib_mr *create_real_mr(struct ib_pd *pd, struct ib_umem *umem,
bool xlt_with_umr; bool xlt_with_umr;
int err; int err;
xlt_with_umr = mlx5_ib_can_load_pas_with_umr(dev, umem->length); xlt_with_umr = mlx5r_umr_can_load_pas(dev, umem->length);
if (xlt_with_umr) { if (xlt_with_umr) {
mr = alloc_cacheable_mr(pd, umem, iova, access_flags); mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
} else { } else {
...@@ -1501,7 +1502,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1501,7 +1502,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
} }
/* ODP requires xlt update via umr to work. */ /* ODP requires xlt update via umr to work. */
if (!mlx5_ib_can_load_pas_with_umr(dev, length)) if (!mlx5r_umr_can_load_pas(dev, length))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags, odp = ib_umem_odp_get(&dev->ib_dev, start, length, access_flags,
...@@ -1591,7 +1592,7 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset, ...@@ -1591,7 +1592,7 @@ struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 offset,
offset, virt_addr, length, fd, access_flags); offset, virt_addr, length, fd, access_flags);
/* dmabuf requires xlt update via umr to work. */ /* dmabuf requires xlt update via umr to work. */
if (!mlx5_ib_can_load_pas_with_umr(dev, length)) if (!mlx5r_umr_can_load_pas(dev, length))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd, umem_dmabuf = ib_umem_dmabuf_get(&dev->ib_dev, offset, length, fd,
...@@ -1666,8 +1667,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev, ...@@ -1666,8 +1667,8 @@ static bool can_use_umr_rereg_access(struct mlx5_ib_dev *dev,
if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | if (diffs & ~(IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING)) IB_ACCESS_REMOTE_READ | IB_ACCESS_RELAXED_ORDERING))
return false; return false;
return mlx5_ib_can_reconfig_with_umr(dev, current_access_flags, return mlx5r_umr_can_reconfig(dev, current_access_flags,
target_access_flags); target_access_flags);
} }
static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
...@@ -1704,7 +1705,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr, ...@@ -1704,7 +1705,7 @@ static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
/* We only track the allocated sizes of MRs from the cache */ /* We only track the allocated sizes of MRs from the cache */
if (!mr->cache_ent) if (!mr->cache_ent)
return false; return false;
if (!mlx5_ib_can_load_pas_with_umr(dev, new_umem->length)) if (!mlx5r_umr_can_load_pas(dev, new_umem->length))
return false; return false;
*page_size = *page_size =
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "cmd.h" #include "cmd.h"
#include "umr.h"
#include "qp.h" #include "qp.h"
#include <linux/mlx5/eq.h> #include <linux/mlx5/eq.h>
...@@ -323,8 +324,7 @@ static void internal_fill_odp_caps(struct mlx5_ib_dev *dev) ...@@ -323,8 +324,7 @@ static void internal_fill_odp_caps(struct mlx5_ib_dev *dev)
memset(caps, 0, sizeof(*caps)); memset(caps, 0, sizeof(*caps));
if (!MLX5_CAP_GEN(dev->mdev, pg) || if (!MLX5_CAP_GEN(dev->mdev, pg) || !mlx5r_umr_can_load_pas(dev, 0))
!mlx5_ib_can_load_pas_with_umr(dev, 0))
return; return;
caps->general_caps = IB_ODP_SUPPORT; caps->general_caps = IB_ODP_SUPPORT;
...@@ -487,8 +487,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, ...@@ -487,8 +487,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
struct mlx5_ib_mr *imr; struct mlx5_ib_mr *imr;
int err; int err;
if (!mlx5_ib_can_load_pas_with_umr(dev, if (!mlx5r_umr_can_load_pas(dev, MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
MLX5_IMR_MTT_ENTRIES * PAGE_SIZE))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags); umem_odp = ib_umem_odp_alloc_implicit(&dev->ib_dev, access_flags);
......
...@@ -6,7 +6,64 @@ ...@@ -6,7 +6,64 @@
#include "mlx5_ib.h" #include "mlx5_ib.h"
#define MLX5_MAX_UMR_SHIFT 16
#define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev); int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev);
void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev); void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev);
static inline bool mlx5r_umr_can_load_pas(struct mlx5_ib_dev *dev,
size_t length)
{
/*
* umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
* always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
* MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
* can never be enabled without this capability. Simplify this weird
* quirky hardware by just saying it can't use PAS lists with UMR at
* all.
*/
if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
return false;
/*
* length is the size of the MR in bytes when mlx5_ib_update_xlt() is
* used.
*/
if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
return false;
return true;
}
/*
* true if an existing MR can be reconfigured to new access_flags using UMR.
* Older HW cannot use UMR to update certain elements of the MKC. See
* get_umr_update_access_mask() and umr_check_mkey_mask()
*/
static inline bool mlx5r_umr_can_reconfig(struct mlx5_ib_dev *dev,
unsigned int current_access_flags,
unsigned int target_access_flags)
{
unsigned int diffs = current_access_flags ^ target_access_flags;
if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
MLX5_CAP_GEN(dev->mdev, atomic) &&
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
return false;
if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
return false;
if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
!MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
return false;
return true;
}
#endif /* _MLX5_IB_UMR_H */ #endif /* _MLX5_IB_UMR_H */
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/mlx5/qp.h> #include <linux/mlx5/qp.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include "wr.h" #include "wr.h"
#include "umr.h"
static const u32 mlx5_ib_opcode[] = { static const u32 mlx5_ib_opcode[] = {
[IB_WR_SEND] = MLX5_OPCODE_SEND, [IB_WR_SEND] = MLX5_OPCODE_SEND,
...@@ -870,7 +871,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp, ...@@ -870,7 +871,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
* Relaxed Ordering is set implicitly in mlx5_set_umr_free_mkey() and * Relaxed Ordering is set implicitly in mlx5_set_umr_free_mkey() and
* kernel ULPs are not aware of it, so we don't set it here. * kernel ULPs are not aware of it, so we don't set it here.
*/ */
if (!mlx5_ib_can_reconfig_with_umr(dev, 0, wr->access)) { if (!mlx5r_umr_can_reconfig(dev, 0, wr->access)) {
mlx5_ib_warn( mlx5_ib_warn(
to_mdev(qp->ibqp.device), to_mdev(qp->ibqp.device),
"Fast update for MR access flags is not possible\n"); "Fast update for MR access flags is not possible\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment