Commit 744b7bdf authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe

RDMA/hns: Support 0 hop addressing for CQE buffer

Add the zero hop addressing support by using mtr interface for CQE buffer,
so the hns driver can support addressing hopnum between 0 to 3 for CQE.

Link: https://lore.kernel.org/r/1586779091-51410-7-git-send-email-liweihang@huawei.comSigned-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 6fd610c5
...@@ -39,51 +39,40 @@ ...@@ -39,51 +39,40 @@
#include <rdma/hns-abi.h> #include <rdma/hns-abi.h>
#include "hns_roce_common.h" #include "hns_roce_common.h"
static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev, static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
struct hns_roce_cq *hr_cq)
{ {
struct hns_roce_cmd_mailbox *mailbox; struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_hem_table *mtt_table;
struct hns_roce_cq_table *cq_table; struct hns_roce_cq_table *cq_table;
struct device *dev = hr_dev->dev; struct ib_device *ibdev = &hr_dev->ib_dev;
u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle; dma_addr_t dma_handle;
u64 *mtts;
int ret; int ret;
cq_table = &hr_dev->cq_table; ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
&dma_handle);
/* Get the physical address of cq buf */ if (ret < 1) {
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) ibdev_err(ibdev, "Failed to find CQ mtr\n");
mtt_table = &hr_dev->mr_table.mtt_cqe_table;
else
mtt_table = &hr_dev->mr_table.mtt_table;
mtts = hns_roce_table_find(hr_dev, mtt_table, hr_cq->mtt.first_seg,
&dma_handle);
if (!mtts) {
dev_err(dev, "Failed to find mtt for CQ buf.\n");
return -EINVAL; return -EINVAL;
} }
cq_table = &hr_dev->cq_table;
ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn);
if (ret) { if (ret) {
dev_err(dev, "Num of CQ out of range.\n"); ibdev_err(ibdev, "Failed to alloc CQ bitmap, err %d\n", ret);
return ret; return ret;
} }
/* Get CQC memory HEM(Hardware Entry Memory) table */ /* Get CQC memory HEM(Hardware Entry Memory) table */
ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn);
if (ret) { if (ret) {
dev_err(dev, ibdev_err(ibdev, "Failed to get CQ(0x%lx) context, err %d\n",
"Get context mem failed(%d) when CQ(0x%lx) alloc.\n", hr_cq->cqn, ret);
ret, hr_cq->cqn);
goto err_out; goto err_out;
} }
ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL));
if (ret) { if (ret) {
dev_err(dev, "Failed to xa_store CQ.\n"); ibdev_err(ibdev, "Failed to xa_store CQ\n");
goto err_put; goto err_put;
} }
...@@ -101,9 +90,9 @@ static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev, ...@@ -101,9 +90,9 @@ static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_CREATE_CQC, HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) { if (ret) {
dev_err(dev, ibdev_err(ibdev,
"Send cmd mailbox failed(%d) when CQ(0x%lx) alloc.\n", "Failed to send create cmd for CQ(0x%lx), err %d\n",
ret, hr_cq->cqn); hr_cq->cqn, ret);
goto err_xa; goto err_xa;
} }
...@@ -126,7 +115,7 @@ static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev, ...@@ -126,7 +115,7 @@ static int hns_roce_alloc_cqc(struct hns_roce_dev *hr_dev,
return ret; return ret;
} }
void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) static void free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{ {
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
...@@ -153,190 +142,86 @@ void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) ...@@ -153,190 +142,86 @@ void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
} }
static int get_cq_umem(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
struct hns_roce_ib_create_cq ucmd, struct ib_udata *udata, unsigned long addr)
struct ib_udata *udata)
{
struct hns_roce_mtt *mtt = &hr_cq->mtt;
struct ib_umem **umem = &hr_cq->umem;
u32 npages;
int ret;
*umem = ib_umem_get(&hr_dev->ib_dev, ucmd.buf_addr, hr_cq->buf_size,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(*umem))
return PTR_ERR(*umem);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
mtt->mtt_type = MTT_TYPE_CQE;
else
mtt->mtt_type = MTT_TYPE_WQE;
npages = DIV_ROUND_UP(ib_umem_page_count(*umem),
1 << hr_dev->caps.cqe_buf_pg_sz);
ret = hns_roce_mtt_init(hr_dev, npages, hr_cq->page_shift, mtt);
if (ret)
goto err_buf;
ret = hns_roce_ib_umem_write_mtt(hr_dev, mtt, *umem);
if (ret)
goto err_mtt;
return 0;
err_mtt:
hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
ib_umem_release(*umem);
return ret;
}
static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{ {
struct hns_roce_buf *buf = &hr_cq->buf; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_mtt *mtt = &hr_cq->mtt; struct hns_roce_buf_attr buf_attr = {};
int ret; int err;
ret = hns_roce_buf_alloc(hr_dev, hr_cq->buf_size, buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + PAGE_ADDR_SHIFT;
(1 << hr_cq->page_shift) * 2, buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
buf, hr_cq->page_shift); buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num;
if (ret) buf_attr.region_count = 1;
goto out; buf_attr.fixed_page = true;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) err = hns_roce_mtr_create(hr_dev, &hr_cq->mtr, &buf_attr,
mtt->mtt_type = MTT_TYPE_CQE; hr_dev->caps.cqe_ba_pg_sz + PAGE_ADDR_SHIFT,
else udata, addr);
mtt->mtt_type = MTT_TYPE_WQE; if (err)
ibdev_err(ibdev, "Failed to alloc CQ mtr, err %d\n", err);
ret = hns_roce_mtt_init(hr_dev, buf->npages, buf->page_shift, mtt);
if (ret) return err;
goto err_buf;
ret = hns_roce_buf_write_mtt(hr_dev, mtt, buf);
if (ret)
goto err_mtt;
return 0;
err_mtt:
hns_roce_mtt_cleanup(hr_dev, mtt);
err_buf:
hns_roce_buf_free(hr_dev, buf);
out:
return ret;
} }
static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) static void free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{ {
hns_roce_buf_free(hr_dev, &hr_cq->buf); hns_roce_mtr_destroy(hr_dev, &hr_cq->mtr);
} }
static int create_user_cq(struct hns_roce_dev *hr_dev, static int alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
struct hns_roce_cq *hr_cq, struct ib_udata *udata, unsigned long addr,
struct ib_udata *udata, struct hns_roce_ib_create_cq_resp *resp)
struct hns_roce_ib_create_cq_resp *resp)
{ {
struct hns_roce_ib_create_cq ucmd; bool has_db = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB;
struct device *dev = hr_dev->dev; struct hns_roce_ucontext *uctx;
int ret; int err;
struct hns_roce_ucontext *context = rdma_udata_to_drv_context(
udata, struct hns_roce_ucontext, ibucontext);
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
dev_err(dev, "Failed to copy_from_udata.\n");
return -EFAULT;
}
/* Get user space address, write it into mtt table */ if (udata) {
ret = get_cq_umem(hr_dev, hr_cq, ucmd, udata); if (has_db &&
if (ret) { udata->outlen >= offsetofend(typeof(*resp), cap_flags)) {
dev_err(dev, "Failed to get_cq_umem.\n"); uctx = rdma_udata_to_drv_context(udata,
return ret; struct hns_roce_ucontext, ibucontext);
} err = hns_roce_db_map_user(uctx, udata, addr,
&hr_cq->db);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB && if (err)
udata->outlen >= offsetofend(typeof(*resp), cap_flags)) { return err;
ret = hns_roce_db_map_user(context, udata, ucmd.db_addr, hr_cq->db_en = 1;
&hr_cq->db); resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB;
if (ret) {
dev_err(dev, "cq record doorbell map failed!\n");
goto err_mtt;
} }
hr_cq->db_en = 1; } else {
resp->cap_flags |= HNS_ROCE_SUPPORT_CQ_RECORD_DB; if (has_db) {
} err = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
if (err)
return 0; return err;
hr_cq->set_ci_db = hr_cq->db.db_record;
err_mtt: *hr_cq->set_ci_db = 0;
hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt); hr_cq->db_en = 1;
ib_umem_release(hr_cq->umem); }
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
return ret; DB_REG_OFFSET * hr_dev->priv_uar.index;
}
static int create_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
struct device *dev = hr_dev->dev;
int ret;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
ret = hns_roce_alloc_db(hr_dev, &hr_cq->db, 1);
if (ret)
return ret;
hr_cq->set_ci_db = hr_cq->db.db_record;
*hr_cq->set_ci_db = 0;
hr_cq->db_en = 1;
}
/* Init mtt table and write buff address to mtt table */
ret = alloc_cq_buf(hr_dev, hr_cq);
if (ret) {
dev_err(dev, "Failed to alloc_cq_buf.\n");
goto err_db;
} }
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
DB_REG_OFFSET * hr_dev->priv_uar.index;
return 0; return 0;
err_db:
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
return ret;
} }
static void destroy_user_cq(struct hns_roce_dev *hr_dev, static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq,
struct hns_roce_cq *hr_cq, struct ib_udata *udata)
struct ib_udata *udata,
struct hns_roce_ib_create_cq_resp *resp)
{ {
struct hns_roce_ucontext *context = rdma_udata_to_drv_context( struct hns_roce_ucontext *uctx;
udata, struct hns_roce_ucontext, ibucontext);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB && if (!hr_cq->db_en)
udata->outlen >= offsetofend(typeof(*resp), cap_flags)) return;
hns_roce_db_unmap_user(context, &hr_cq->db);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
}
static void destroy_kernel_cq(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq)
{
hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) hr_cq->db_en = 0;
if (udata) {
uctx = rdma_udata_to_drv_context(udata,
struct hns_roce_ucontext,
ibucontext);
hns_roce_db_unmap_user(uctx, &hr_cq->db);
} else {
hns_roce_free_db(hr_dev, &hr_cq->db); hns_roce_free_db(hr_dev, &hr_cq->db);
}
} }
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
...@@ -345,20 +230,21 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, ...@@ -345,20 +230,21 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_ib_create_cq_resp resp = {}; struct hns_roce_ib_create_cq_resp resp = {};
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
struct device *dev = hr_dev->dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_ib_create_cq ucmd = {};
int vector = attr->comp_vector; int vector = attr->comp_vector;
u32 cq_entries = attr->cqe; u32 cq_entries = attr->cqe;
int ret; int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Create CQ failed. entries=%d, max=%d\n", ibdev_err(ibdev, "Failed to check CQ count %d max=%d\n",
cq_entries, hr_dev->caps.max_cqes); cq_entries, hr_dev->caps.max_cqes);
return -EINVAL; return -EINVAL;
} }
if (vector >= hr_dev->caps.num_comp_vectors) { if (vector >= hr_dev->caps.num_comp_vectors) {
dev_err(dev, "Create CQ failed, vector=%d, max=%d\n", ibdev_err(ibdev, "Failed to check CQ vector=%d max=%d\n",
vector, hr_dev->caps.num_comp_vectors); vector, hr_dev->caps.num_comp_vectors);
return -EINVAL; return -EINVAL;
} }
...@@ -367,30 +253,35 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, ...@@ -367,30 +253,35 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */ hr_cq->ib_cq.cqe = cq_entries - 1; /* used as cqe index */
hr_cq->cq_depth = cq_entries; hr_cq->cq_depth = cq_entries;
hr_cq->vector = vector; hr_cq->vector = vector;
hr_cq->buf_size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz;
hr_cq->page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
spin_lock_init(&hr_cq->lock); spin_lock_init(&hr_cq->lock);
INIT_LIST_HEAD(&hr_cq->sq_list); INIT_LIST_HEAD(&hr_cq->sq_list);
INIT_LIST_HEAD(&hr_cq->rq_list); INIT_LIST_HEAD(&hr_cq->rq_list);
if (udata) { if (udata) {
ret = create_user_cq(hr_dev, hr_cq, udata, &resp); ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (ret) {
dev_err(dev, "Create cq failed in user mode!\n");
goto err_cq;
}
} else {
ret = create_kernel_cq(hr_dev, hr_cq);
if (ret) { if (ret) {
dev_err(dev, "Create cq failed in kernel mode!\n"); ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n",
goto err_cq; ret);
return ret;
} }
} }
ret = hns_roce_alloc_cqc(hr_dev, hr_cq); ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
if (ret) {
ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret);
return ret;
}
ret = alloc_cq_db(hr_dev, hr_cq, udata, ucmd.db_addr, &resp);
if (ret) { if (ret) {
dev_err(dev, "Alloc CQ failed(%d).\n", ret); ibdev_err(ibdev, "Failed to alloc CQ db, err %d\n", ret);
goto err_dbmap; goto err_cq_buf;
}
ret = alloc_cqc(hr_dev, hr_cq);
if (ret) {
ibdev_err(ibdev, "Failed to alloc CQ context, err %d\n", ret);
goto err_cq_db;
} }
/* /*
...@@ -412,15 +303,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, ...@@ -412,15 +303,11 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
return 0; return 0;
err_cqc: err_cqc:
hns_roce_free_cqc(hr_dev, hr_cq); free_cqc(hr_dev, hr_cq);
err_cq_db:
err_dbmap: free_cq_db(hr_dev, hr_cq, udata);
if (udata) err_cq_buf:
destroy_user_cq(hr_dev, hr_cq, udata, &resp); free_cq_buf(hr_dev, hr_cq);
else
destroy_kernel_cq(hr_dev, hr_cq);
err_cq:
return ret; return ret;
} }
...@@ -429,28 +316,12 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ...@@ -429,28 +316,12 @@ void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
if (hr_dev->hw->destroy_cq) { if (hr_dev->hw->destroy_cq)
hr_dev->hw->destroy_cq(ib_cq, udata); hr_dev->hw->destroy_cq(ib_cq, udata);
return;
}
hns_roce_free_cqc(hr_dev, hr_cq);
hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem); free_cq_buf(hr_dev, hr_cq);
if (udata) { free_cq_db(hr_dev, hr_cq, udata);
if (hr_cq->db_en == 1) free_cqc(hr_dev, hr_cq);
hns_roce_db_unmap_user(rdma_udata_to_drv_context(
udata,
struct hns_roce_ucontext,
ibucontext),
&hr_cq->db);
} else {
/* Free the buff of stored cq */
free_cq_buf(hr_dev, hr_cq);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_cq->db);
}
} }
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
......
...@@ -503,14 +503,10 @@ struct hns_roce_db { ...@@ -503,14 +503,10 @@ struct hns_roce_db {
struct hns_roce_cq { struct hns_roce_cq {
struct ib_cq ib_cq; struct ib_cq ib_cq;
struct hns_roce_buf buf; struct hns_roce_mtr mtr;
struct hns_roce_mtt mtt;
struct hns_roce_db db; struct hns_roce_db db;
u8 db_en; u8 db_en;
spinlock_t lock; spinlock_t lock;
struct ib_umem *umem;
u32 buf_size;
int page_shift;
u32 cq_depth; u32 cq_depth;
u32 cons_index; u32 cons_index;
u32 *set_ci_db; u32 *set_ci_db;
...@@ -1294,8 +1290,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, ...@@ -1294,8 +1290,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata); struct ib_udata *udata);
void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata); void hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
void hns_roce_free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq);
int hns_roce_db_map_user(struct hns_roce_ucontext *context, int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt, struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db); struct hns_roce_db *db);
......
...@@ -1972,7 +1972,8 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr, ...@@ -1972,7 +1972,8 @@ static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
static void *get_cqe(struct hns_roce_cq *hr_cq, int n) static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
{ {
return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE); return hns_roce_buf_offset(hr_cq->mtr.kmem,
n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
} }
static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
...@@ -3644,8 +3645,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ...@@ -3644,8 +3645,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
u32 cqe_cnt_cur; u32 cqe_cnt_cur;
int wait_time = 0; int wait_time = 0;
hns_roce_free_cqc(hr_dev, hr_cq);
/* /*
* Before freeing cq buffer, we need to ensure that the outstanding CQE * Before freeing cq buffer, we need to ensure that the outstanding CQE
* have been written by checking the CQE counter. * have been written by checking the CQE counter.
...@@ -3668,14 +3667,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ...@@ -3668,14 +3667,6 @@ static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
} }
wait_time++; wait_time++;
} }
hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
ib_umem_release(hr_cq->umem);
if (!udata) {
/* Free the buff of stored cq */
hns_roce_buf_free(hr_dev, &hr_cq->buf);
}
} }
static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not) static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
......
...@@ -2680,7 +2680,8 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) ...@@ -2680,7 +2680,8 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{ {
return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE); return hns_roce_buf_offset(hr_cq->mtr.kmem,
n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
} }
static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n) static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
...@@ -2801,30 +2802,30 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, ...@@ -2801,30 +2802,30 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M, roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
V2_CQC_BYTE_8_CQN_S, hr_cq->cqn); V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT); cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0]));
roce_set_field(cq_context->byte_16_hop_addr, roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M, V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S, V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
mtts[0] >> (32 + PAGE_ADDR_SHIFT)); upper_32_bits(to_hr_hw_page_addr(mtts[0])));
roce_set_field(cq_context->byte_16_hop_addr, roce_set_field(cq_context->byte_16_hop_addr,
V2_CQC_BYTE_16_CQE_HOP_NUM_M, V2_CQC_BYTE_16_CQE_HOP_NUM_M,
V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num == V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num); HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT); cq_context->cqe_nxt_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[1]));
roce_set_field(cq_context->byte_24_pgsz_addr, roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M, V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S, V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
mtts[1] >> (32 + PAGE_ADDR_SHIFT)); upper_32_bits(to_hr_hw_page_addr(mtts[1])));
roce_set_field(cq_context->byte_24_pgsz_addr, roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_M, V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BA_PG_SZ_S, V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET); to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.ba_pg_shift));
roce_set_field(cq_context->byte_24_pgsz_addr, roce_set_field(cq_context->byte_24_pgsz_addr,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M, V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S, V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET); to_hr_hw_page_shift(hr_cq->mtr.hem_cfg.buf_pg_shift));
cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3); cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment