Commit 89b4b70b authored by chenglang's avatar chenglang Committed by Jason Gunthorpe

RDMA/hns: Optimize hns_roce_mhop_alloc function.

Move some lines for allocating multi-hop addressing into independent
functions in order to improve readability.

Link: https://lore.kernel.org/r/1562593285-8037-7-git-send-email-oulijun@huawei.comSigned-off-by: default avatarLang Cheng <chenglang@huawei.com>
Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 99441ab5
...@@ -347,31 +347,11 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev, ...@@ -347,31 +347,11 @@ static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
mr->pbl_bt_l0 = NULL; mr->pbl_bt_l0 = NULL;
mr->pbl_l0_dma_addr = 0; mr->pbl_l0_dma_addr = 0;
} }
static int pbl_1hop_alloc(struct hns_roce_dev *hr_dev, int npages,
/* PBL multi hop addressing */ struct hns_roce_mr *mr, u32 pbl_bt_sz)
static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr)
{ {
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
int mr_alloc_done = 0;
int npages_allocated;
int i = 0, j = 0;
u32 pbl_bt_sz;
u32 mhop_num;
u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 bt_idx;
u64 size;
mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return 0;
/* hop_num = 1 */
if (mhop_num == 1) {
if (npages > pbl_bt_sz / 8) { if (npages > pbl_bt_sz / 8) {
dev_err(dev, "npages %d is larger than buf_pg_sz!", dev_err(dev, "npages %d is larger than buf_pg_sz!",
npages); npages);
...@@ -385,45 +365,26 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -385,45 +365,26 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
mr->pbl_size = npages; mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr; mr->pbl_ba = mr->pbl_dma_addr;
mr->pbl_hop_num = mhop_num; mr->pbl_hop_num = 1;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz; mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz; mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0; return 0;
}
mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
sizeof(*mr->pbl_l1_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l1_dma_addr)
return -ENOMEM;
mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1), }
GFP_KERNEL);
if (!mr->pbl_bt_l1)
goto err_kcalloc_bt_l1;
if (mhop_num == 3) {
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_l2_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l2_dma_addr)
goto err_kcalloc_l2_dma;
mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num, static int pbl_2hop_alloc(struct hns_roce_dev *hr_dev, int npages,
sizeof(*mr->pbl_bt_l2), struct hns_roce_mr *mr, u32 pbl_bt_sz)
GFP_KERNEL); {
if (!mr->pbl_bt_l2) struct device *dev = hr_dev->dev;
goto err_kcalloc_bt_l2; int npages_allocated;
} u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 size;
int i;
/* alloc L0 BT */ pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l0_dma_addr),
GFP_KERNEL);
if (!mr->pbl_bt_l0)
goto err_dma_alloc_l0;
if (mhop_num == 2) {
/* alloc L1 BT */ /* alloc L1 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) { for (i = 0; i < pbl_bt_sz / 8; i++) {
if (pbl_bt_cnt + 1 < pbl_last_bt_num) { if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
...@@ -437,7 +398,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -437,7 +398,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
GFP_KERNEL); GFP_KERNEL);
if (!mr->pbl_bt_l1[i]) { if (!mr->pbl_bt_l1[i]) {
hns_roce_loop_free(hr_dev, mr, 1, i, 0); hns_roce_loop_free(hr_dev, mr, 1, i, 0);
goto err_dma_alloc_l0; return -ENOMEM;
} }
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i]; *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
...@@ -446,7 +407,39 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -446,7 +407,39 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
if (pbl_bt_cnt >= pbl_last_bt_num) if (pbl_bt_cnt >= pbl_last_bt_num)
break; break;
} }
} else if (mhop_num == 3) {
mr->l0_chunk_last_num = i + 1;
return 0;
}
static int pbl_3hop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr, u32 pbl_bt_sz)
{
struct device *dev = hr_dev->dev;
int mr_alloc_done = 0;
int npages_allocated;
u64 pbl_last_bt_num;
u64 pbl_bt_cnt = 0;
u64 bt_idx;
u64 size;
int i;
int j = 0;
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_l2_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l2_dma_addr)
return -ENOMEM;
mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
sizeof(*mr->pbl_bt_l2),
GFP_KERNEL);
if (!mr->pbl_bt_l2)
goto err_kcalloc_bt_l2;
/* alloc L1, L2 BT */ /* alloc L1, L2 BT */
for (i = 0; i < pbl_bt_sz / 8; i++) { for (i = 0; i < pbl_bt_sz / 8; i++) {
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
...@@ -491,17 +484,10 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -491,17 +484,10 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
if (mr_alloc_done) if (mr_alloc_done)
break; break;
} }
}
mr->l0_chunk_last_num = i + 1; mr->l0_chunk_last_num = i + 1;
if (mhop_num == 3)
mr->l1_chunk_last_num = j + 1; mr->l1_chunk_last_num = j + 1;
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_l0_dma_addr;
mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0; return 0;
...@@ -513,6 +499,65 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages, ...@@ -513,6 +499,65 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
kfree(mr->pbl_l2_dma_addr); kfree(mr->pbl_l2_dma_addr);
mr->pbl_l2_dma_addr = NULL; mr->pbl_l2_dma_addr = NULL;
return -ENOMEM;
}
/* PBL multi hop addressing */
static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
struct hns_roce_mr *mr)
{
struct device *dev = hr_dev->dev;
u32 pbl_bt_sz;
u32 mhop_num;
mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return 0;
/* hop_num = 1 */
if (mhop_num == 1)
return pbl_1hop_alloc(hr_dev, npages, mr, pbl_bt_sz);
mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
sizeof(*mr->pbl_l1_dma_addr),
GFP_KERNEL);
if (!mr->pbl_l1_dma_addr)
return -ENOMEM;
mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
GFP_KERNEL);
if (!mr->pbl_bt_l1)
goto err_kcalloc_bt_l1;
/* alloc L0 BT */
mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
&(mr->pbl_l0_dma_addr),
GFP_KERNEL);
if (!mr->pbl_bt_l0)
goto err_kcalloc_l2_dma;
if (mhop_num == 2) {
if (pbl_2hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
goto err_kcalloc_l2_dma;
}
if (mhop_num == 3) {
if (pbl_3hop_alloc(hr_dev, npages, mr, pbl_bt_sz))
goto err_kcalloc_l2_dma;
}
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_l0_dma_addr;
mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
err_kcalloc_l2_dma: err_kcalloc_l2_dma:
kfree(mr->pbl_bt_l1); kfree(mr->pbl_bt_l1);
mr->pbl_bt_l1 = NULL; mr->pbl_bt_l1 = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment