Commit 72d422c2 authored by Sindhu Devale's avatar Sindhu Devale Committed by Leon Romanovsky

RDMA/irdma: Use HW specific minimum WQ size

HW GEN1 and GEN2 have different min WQ sizes but they are
currently set to the same value.

Use a gen specific attribute min_hw_wq_size and extend ABI to
pass it to user-space.
Signed-off-by: default avatarSindhu Devale <sindhu.devale@intel.com>
Signed-off-by: default avatarShiraz Saleem <shiraz.saleem@intel.com>
Link: https://lore.kernel.org/r/20230725155525.1081-3-shiraz.saleem@intel.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent 3a849872
...@@ -254,5 +254,6 @@ void i40iw_init_hw(struct irdma_sc_dev *dev) ...@@ -254,5 +254,6 @@ void i40iw_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_1; dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_1;
dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE; dev->hw_attrs.max_hw_outbound_msg_size = I40IW_MAX_OUTBOUND_MSG_SIZE;
dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE; dev->hw_attrs.max_hw_inbound_msg_size = I40IW_MAX_INBOUND_MSG_SIZE;
dev->hw_attrs.uk_attrs.min_hw_wq_size = I40IW_MIN_WQ_SIZE;
dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS; dev->hw_attrs.max_qp_wr = I40IW_MAX_QP_WRS;
} }
...@@ -140,11 +140,11 @@ enum i40iw_device_caps_const { ...@@ -140,11 +140,11 @@ enum i40iw_device_caps_const {
I40IW_MAX_CQ_SIZE = 1048575, I40IW_MAX_CQ_SIZE = 1048575,
I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647, I40IW_MAX_OUTBOUND_MSG_SIZE = 2147483647,
I40IW_MAX_INBOUND_MSG_SIZE = 2147483647, I40IW_MAX_INBOUND_MSG_SIZE = 2147483647,
I40IW_MIN_WQ_SIZE = 4 /* WQEs */,
}; };
#define I40IW_QP_WQE_MIN_SIZE 32 #define I40IW_QP_WQE_MIN_SIZE 32
#define I40IW_QP_WQE_MAX_SIZE 128 #define I40IW_QP_WQE_MAX_SIZE 128
#define I40IW_QP_SW_MIN_WQSIZE 4
#define I40IW_MAX_RQ_WQE_SHIFT 2 #define I40IW_MAX_RQ_WQE_SHIFT 2
#define I40IW_MAX_QUANTA_PER_WR 2 #define I40IW_MAX_QUANTA_PER_WR 2
......
...@@ -195,6 +195,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev) ...@@ -195,6 +195,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev)
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2; dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
dev->hw_attrs.uk_attrs.min_hw_wq_size = ICRDMA_MIN_WQ_SIZE;
dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR; dev->hw_attrs.uk_attrs.max_hw_sq_chunk = IRDMA_MAX_QUANTA_PER_WR;
dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE | dev->hw_attrs.uk_attrs.feature_flags |= IRDMA_FEATURE_RTS_AE |
IRDMA_FEATURE_CQ_RESIZE; IRDMA_FEATURE_CQ_RESIZE;
......
...@@ -64,6 +64,7 @@ enum icrdma_device_caps_const { ...@@ -64,6 +64,7 @@ enum icrdma_device_caps_const {
ICRDMA_MAX_IRD_SIZE = 127, ICRDMA_MAX_IRD_SIZE = 127,
ICRDMA_MAX_ORD_SIZE = 255, ICRDMA_MAX_ORD_SIZE = 255,
ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
}; };
......
...@@ -119,6 +119,7 @@ struct irdma_uk_attrs { ...@@ -119,6 +119,7 @@ struct irdma_uk_attrs {
u32 min_hw_cq_size; u32 min_hw_cq_size;
u32 max_hw_cq_size; u32 max_hw_cq_size;
u16 max_hw_sq_chunk; u16 max_hw_sq_chunk;
u16 min_hw_wq_size;
u8 hw_rev; u8 hw_rev;
}; };
......
...@@ -1349,10 +1349,12 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, ...@@ -1349,10 +1349,12 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
u32 *sqdepth) u32 *sqdepth)
{ {
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
*sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD); *sqdepth = irdma_qp_round_up((sq_size << shift) + IRDMA_SQ_RSVD);
if (*sqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift)) if (*sqdepth < min_size)
*sqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift; *sqdepth = min_size;
else if (*sqdepth > uk_attrs->max_hw_wq_quanta) else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
return -EINVAL; return -EINVAL;
...@@ -1369,10 +1371,12 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, ...@@ -1369,10 +1371,12 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
u32 *rqdepth) u32 *rqdepth)
{ {
u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
*rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD); *rqdepth = irdma_qp_round_up((rq_size << shift) + IRDMA_RQ_RSVD);
if (*rqdepth < (IRDMA_QP_SW_MIN_WQSIZE << shift)) if (*rqdepth < min_size)
*rqdepth = IRDMA_QP_SW_MIN_WQSIZE << shift; *rqdepth = min_size;
else if (*rqdepth > uk_attrs->max_hw_rq_quanta) else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
return -EINVAL; return -EINVAL;
......
...@@ -85,6 +85,7 @@ enum irdma_device_caps_const { ...@@ -85,6 +85,7 @@ enum irdma_device_caps_const {
IRDMA_Q2_BUF_SIZE = 256, IRDMA_Q2_BUF_SIZE = 256,
IRDMA_QP_CTX_SIZE = 256, IRDMA_QP_CTX_SIZE = 256,
IRDMA_MAX_PDS = 262144, IRDMA_MAX_PDS = 262144,
IRDMA_MIN_WQ_SIZE_GEN2 = 8,
}; };
enum irdma_addressing_type { enum irdma_addressing_type {
......
...@@ -330,6 +330,8 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx, ...@@ -330,6 +330,8 @@ static int irdma_alloc_ucontext(struct ib_ucontext *uctx,
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size; uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev; uresp.hw_rev = uk_attrs->hw_rev;
uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR; uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
if (ib_copy_to_udata(udata, &uresp, if (ib_copy_to_udata(udata, &uresp,
min(sizeof(uresp), udata->outlen))) { min(sizeof(uresp), udata->outlen))) {
rdma_user_mmap_entry_remove(ucontext->db_mmap_entry); rdma_user_mmap_entry_remove(ucontext->db_mmap_entry);
......
...@@ -24,6 +24,7 @@ enum irdma_memreg_type { ...@@ -24,6 +24,7 @@ enum irdma_memreg_type {
enum { enum {
IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0, IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
}; };
struct irdma_alloc_ucontext_req { struct irdma_alloc_ucontext_req {
...@@ -52,6 +53,8 @@ struct irdma_alloc_ucontext_resp { ...@@ -52,6 +53,8 @@ struct irdma_alloc_ucontext_resp {
__u8 hw_rev; __u8 hw_rev;
__u8 rsvd2; __u8 rsvd2;
__aligned_u64 comp_mask; __aligned_u64 comp_mask;
__u16 min_hw_wq_size;
__u8 rsvd3[6];
}; };
struct irdma_alloc_pd_resp { struct irdma_alloc_pd_resp {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment