Commit 744b7bdf authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe

RDMA/hns: Support 0 hop addressing for CQE buffer

Add the zero hop addressing support by using mtr interface for CQE buffer,
so the hns driver can support addressing hopnum between 0 to 3 for CQE.

Link: https://lore.kernel.org/r/1586779091-51410-7-git-send-email-liweihang@huawei.comSigned-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 6fd610c5
This diff is collapsed.
...@@ -503,14 +503,10 @@ struct hns_roce_db { ...@@ -503,14 +503,10 @@ struct hns_roce_db {
struct hns_roce_cq { struct hns_roce_cq {
struct ib_cq ib_cq; struct ib_cq ib_cq;
struct hns_roce_buf buf; struct hns_roce_mtr mtr;
struct hns_roce_mtt mtt;
struct hns_roce_db db; struct hns_roce_db db;
u8 db_en; u8 db_en;
spinlock_t lock; spinlock_t lock;
struct ib_umem *umem;
u32 buf_size;
int page_shift;
u32 cq_depth; u32 cq_depth;
u32 cons_index; u32 cons_index;
u32 *set_ci_db; u32 *set_ci_db;
...@@ -1294,8 +1290,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, ...@@ -1294,8 +1290,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata); struct ib_udata *udata);
void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
int hns_roce_db_map_user(struct hns_roce_ucontext *context, int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt, struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db); struct hns_roce_db *db);
......
...@@ -1972,7 +1972,8 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, ...@@ -1972,7 +1972,8 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
static void *get_cqe(struct hns_roce_cq *hr_cq, int n) static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
{ {
return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE); return hns_roce_buf_offset(hr_cq->mtr.kmem,
n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
} }
static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
...@@ -3644,8 +3645,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ...@@ -3644,8 +3645,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
u32 cqe_cnt_cur; u32 cqe_cnt_cur;
int wait_time = 0; int wait_time = 0;
hns_roce_free_cqc(hr_dev, hr_cq);
/* /*
* Before freeing cq buffer, we need to ensure that the outstanding CQE * Before freeing cq buffer, we need to ensure that the outstanding CQE
* have been written by checking the CQE counter. * have been written by checking the CQE counter.
...@@ -3668,14 +3667,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ...@@ -3668,14 +3667,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
} }
wait_time++; wait_time++;
} }
hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (!udata) {
/* Free the buff of stored cq */
hns_roce_buf_free(hr_dev, &hr_cq->buf);
}
} }
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not) static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
......
...@@ -2680,7 +2680,8 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) ...@@ -2680,7 +2680,8 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{ {
return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE); return hns_roce_buf_offset(hr_cq->mtr.kmem,
n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
} }
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n) static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
...@@ -2801,30 +2802,30 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, ...@@ -2801,30 +2802,30 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M, roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn); V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT); cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
roce_set_field(cq_context->byte_16_hop_addr, roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M, V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S, V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
mtts[0] >> (32 + PAGE_ADDR_SHIFT)); upper_32_bits(to_hr_hw_page_addr(mtts[0])));
roce_set_field(cq_context->byte_16_hop_addr, roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_HOP_NUM_M, V2_CQC_BYTE_16_CQE_HOP_NUM_M,
V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num == V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num); HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT); cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
roce_set_field(cq_context->byte_24_pgsz_addr, roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M, V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S, V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
mtts[1] >> (32 + PAGE_ADDR_SHIFT)); upper_32_bits(to_hr_hw_page_addr(mtts[1])));
roce_set_field(cq_context->byte_24_pgsz_addr, roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_M, V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_S, V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET); to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
roce_set_field(cq_context->byte_24_pgsz_addr, roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M, V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S, V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET); to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3); cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment