Commit cc8997c9 authored by Sindhu Devale's avatar Sindhu Devale Committed by Leon Romanovsky

RDMA/irdma: Refactor PBLE functions

Refactor PBLE functions using a bit mask to represent the PBLE level
desired versus 2 parameters use_pble and lvl_one_only which makes the
code confusing.
Signed-off-by: default avatarMustafa Ismail <mustafa.ismail@intel.com>
Signed-off-by: default avatarSindhu Devale <sindhu.devale@intel.com>
Link: https://lore.kernel.org/r/20230315145305.955-5-shiraz.saleem@intel.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent 99f96b45
...@@ -423,15 +423,15 @@ static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -423,15 +423,15 @@ static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
* get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
* @pble_rsrc: pble resources * @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr) * @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @level1_only: flag for a level 1 PBLE * @lvl: Bitmask for requested pble level
*/ */
static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, bool level1_only) struct irdma_pble_alloc *palloc, u8 lvl)
{ {
int status = 0; int status = 0;
status = get_lvl1_pble(pble_rsrc, palloc); status = get_lvl1_pble(pble_rsrc, palloc);
if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE) if (!status || lvl == PBLE_LEVEL_1 || palloc->total_cnt <= PBLE_PER_PAGE)
return status; return status;
status = get_lvl2_pble(pble_rsrc, palloc); status = get_lvl2_pble(pble_rsrc, palloc);
...@@ -444,11 +444,11 @@ static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -444,11 +444,11 @@ static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
* @pble_rsrc: pble resources * @pble_rsrc: pble resources
* @palloc: contains all inforamtion regarding pble (idx + pble addr) * @palloc: contains all inforamtion regarding pble (idx + pble addr)
* @pble_cnt: #of pbles requested * @pble_cnt: #of pbles requested
* @level1_only: true if only pble level 1 to acquire * @lvl: requested pble level mask
*/ */
int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, u32 pble_cnt, struct irdma_pble_alloc *palloc, u32 pble_cnt,
bool level1_only) u8 lvl)
{ {
int status = 0; int status = 0;
int max_sds = 0; int max_sds = 0;
...@@ -462,7 +462,7 @@ int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -462,7 +462,7 @@ int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
/*check first to see if we can get pble's without acquiring /*check first to see if we can get pble's without acquiring
* additional sd's * additional sd's
*/ */
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only); status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
if (!status) if (!status)
goto exit; goto exit;
...@@ -472,9 +472,9 @@ int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -472,9 +472,9 @@ int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
if (status) if (status)
break; break;
status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only); status = get_lvl1_lvl2_pble(pble_rsrc, palloc, lvl);
/* if level1_only, only go through it once */ /* if level1_only, only go through it once */
if (!status || level1_only) if (!status || lvl)
break; break;
} }
......
...@@ -114,7 +114,7 @@ void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, ...@@ -114,7 +114,7 @@ void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc); struct irdma_pble_alloc *palloc);
int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
struct irdma_pble_alloc *palloc, u32 pble_cnt, struct irdma_pble_alloc *palloc, u32 pble_cnt,
bool level1_only); u8 lvl);
int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, int irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm,
struct irdma_chunk *pchunk); struct irdma_chunk *pchunk);
int irdma_prm_get_pbles(struct irdma_pble_prm *pprm, int irdma_prm_get_pbles(struct irdma_pble_prm *pprm,
......
...@@ -2325,11 +2325,10 @@ static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc, ...@@ -2325,11 +2325,10 @@ static bool irdma_check_mr_contiguous(struct irdma_pble_alloc *palloc,
* irdma_setup_pbles - copy user pg address to pble's * irdma_setup_pbles - copy user pg address to pble's
* @rf: RDMA PCI function * @rf: RDMA PCI function
* @iwmr: mr pointer for this memory registration * @iwmr: mr pointer for this memory registration
* @use_pbles: flag if to use pble's * @lvl: requested pble levels
* @lvl_1_only: request only level 1 pble if true
*/ */
static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
bool use_pbles, bool lvl_1_only) u8 lvl)
{ {
struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_pbl *iwpbl = &iwmr->iwpbl;
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
...@@ -2338,9 +2337,9 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, ...@@ -2338,9 +2337,9 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
int status; int status;
enum irdma_pble_level level = PBLE_LEVEL_1; enum irdma_pble_level level = PBLE_LEVEL_1;
if (use_pbles) { if (lvl) {
status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt, status = irdma_get_pble(rf->pble_rsrc, palloc, iwmr->page_cnt,
lvl_1_only); lvl);
if (status) if (status)
return status; return status;
...@@ -2355,7 +2354,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, ...@@ -2355,7 +2354,7 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
irdma_copy_user_pgaddrs(iwmr, pbl, level); irdma_copy_user_pgaddrs(iwmr, pbl, level);
if (use_pbles) if (lvl)
iwmr->pgaddrmem[0] = *pbl; iwmr->pgaddrmem[0] = *pbl;
return 0; return 0;
...@@ -2366,11 +2365,11 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr, ...@@ -2366,11 +2365,11 @@ static int irdma_setup_pbles(struct irdma_pci_f *rf, struct irdma_mr *iwmr,
* @iwdev: irdma device * @iwdev: irdma device
* @req: information for q memory management * @req: information for q memory management
* @iwpbl: pble struct * @iwpbl: pble struct
* @use_pbles: flag to use pble * @lvl: pble level mask
*/ */
static int irdma_handle_q_mem(struct irdma_device *iwdev, static int irdma_handle_q_mem(struct irdma_device *iwdev,
struct irdma_mem_reg_req *req, struct irdma_mem_reg_req *req,
struct irdma_pbl *iwpbl, bool use_pbles) struct irdma_pbl *iwpbl, u8 lvl)
{ {
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc; struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_mr *iwmr = iwpbl->iwmr; struct irdma_mr *iwmr = iwpbl->iwmr;
...@@ -2383,11 +2382,11 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, ...@@ -2383,11 +2382,11 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
bool ret = true; bool ret = true;
pg_size = iwmr->page_size; pg_size = iwmr->page_size;
err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, true); err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
if (err) if (err)
return err; return err;
if (use_pbles) if (lvl)
arr = palloc->level1.addr; arr = palloc->level1.addr;
switch (iwmr->type) { switch (iwmr->type) {
...@@ -2396,7 +2395,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, ...@@ -2396,7 +2395,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
hmc_p = &qpmr->sq_pbl; hmc_p = &qpmr->sq_pbl;
qpmr->shadow = (dma_addr_t)arr[total]; qpmr->shadow = (dma_addr_t)arr[total];
if (use_pbles) { if (lvl) {
ret = irdma_check_mem_contiguous(arr, req->sq_pages, ret = irdma_check_mem_contiguous(arr, req->sq_pages,
pg_size); pg_size);
if (ret) if (ret)
...@@ -2421,7 +2420,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, ...@@ -2421,7 +2420,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
if (!cqmr->split) if (!cqmr->split)
cqmr->shadow = (dma_addr_t)arr[req->cq_pages]; cqmr->shadow = (dma_addr_t)arr[req->cq_pages];
if (use_pbles) if (lvl)
ret = irdma_check_mem_contiguous(arr, req->cq_pages, ret = irdma_check_mem_contiguous(arr, req->cq_pages,
pg_size); pg_size);
...@@ -2435,7 +2434,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev, ...@@ -2435,7 +2434,7 @@ static int irdma_handle_q_mem(struct irdma_device *iwdev,
err = -EINVAL; err = -EINVAL;
} }
if (use_pbles && ret) { if (lvl && ret) {
irdma_free_pble(iwdev->rf->pble_rsrc, palloc); irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
iwpbl->pbl_allocated = false; iwpbl->pbl_allocated = false;
} }
...@@ -2745,17 +2744,17 @@ static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access) ...@@ -2745,17 +2744,17 @@ static int irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
{ {
struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device); struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_pbl *iwpbl = &iwmr->iwpbl;
bool use_pbles;
u32 stag; u32 stag;
u8 lvl;
int err; int err;
use_pbles = iwmr->page_cnt != 1; lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
err = irdma_setup_pbles(iwdev->rf, iwmr, use_pbles, false); err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
if (err) if (err)
return err; return err;
if (use_pbles) { if (lvl) {
err = irdma_check_mr_contiguous(&iwpbl->pble_alloc, err = irdma_check_mr_contiguous(&iwpbl->pble_alloc,
iwmr->page_size); iwmr->page_size);
if (err) { if (err) {
...@@ -2839,17 +2838,17 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req, ...@@ -2839,17 +2838,17 @@ static int irdma_reg_user_mr_type_qp(struct irdma_mem_reg_req req,
struct irdma_pbl *iwpbl = &iwmr->iwpbl; struct irdma_pbl *iwpbl = &iwmr->iwpbl;
struct irdma_ucontext *ucontext = NULL; struct irdma_ucontext *ucontext = NULL;
unsigned long flags; unsigned long flags;
bool use_pbles;
u32 total; u32 total;
int err; int err;
u8 lvl;
total = req.sq_pages + req.rq_pages + 1; total = req.sq_pages + req.rq_pages + 1;
if (total > iwmr->page_cnt) if (total > iwmr->page_cnt)
return -EINVAL; return -EINVAL;
total = req.sq_pages + req.rq_pages; total = req.sq_pages + req.rq_pages;
use_pbles = total > 2; lvl = total > 2 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
if (err) if (err)
return err; return err;
...@@ -2872,9 +2871,9 @@ static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req, ...@@ -2872,9 +2871,9 @@ static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
struct irdma_ucontext *ucontext = NULL; struct irdma_ucontext *ucontext = NULL;
u8 shadow_pgcnt = 1; u8 shadow_pgcnt = 1;
unsigned long flags; unsigned long flags;
bool use_pbles;
u32 total; u32 total;
int err; int err;
u8 lvl;
if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE) if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_CQ_RESIZE)
shadow_pgcnt = 0; shadow_pgcnt = 0;
...@@ -2882,8 +2881,8 @@ static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req, ...@@ -2882,8 +2881,8 @@ static int irdma_reg_user_mr_type_cq(struct irdma_mem_reg_req req,
if (total > iwmr->page_cnt) if (total > iwmr->page_cnt)
return -EINVAL; return -EINVAL;
use_pbles = req.cq_pages > 1; lvl = req.cq_pages > 1 ? PBLE_LEVEL_1 : PBLE_LEVEL_0;
err = irdma_handle_q_mem(iwdev, &req, iwpbl, use_pbles); err = irdma_handle_q_mem(iwdev, &req, iwpbl, lvl);
if (err) if (err)
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment