Commit ea222485 authored by Chandramohan Akula's avatar Chandramohan Akula Committed by Jason Gunthorpe

RDMA/bnxt_re: Update alloc_page uapi for pacing

Update the alloc_page uapi functionality for handling the
mapping of doorbell pacing shared page and bar address.

Link: https://lore.kernel.org/r/1689742977-9128-6-git-send-email-selvin.xavier@broadcom.comSigned-off-by: default avatarChandramohan Akula <chandramohan.akula@broadcom.com>
Signed-off-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent fa8fad92
......@@ -565,6 +565,8 @@ bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
break;
case BNXT_RE_MMAP_UC_DB:
case BNXT_RE_MMAP_WC_DB:
case BNXT_RE_MMAP_DBR_BAR:
case BNXT_RE_MMAP_DBR_PAGE:
ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
&entry->rdma_entry, PAGE_SIZE);
break;
......@@ -4149,6 +4151,19 @@ int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
case BNXT_RE_MMAP_SH_PAGE:
ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
break;
case BNXT_RE_MMAP_DBR_BAR:
pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot),
rdma_entry);
break;
case BNXT_RE_MMAP_DBR_PAGE:
/* Driver doesn't expect write access for user space */
if (vma->vm_flags & VM_WRITE)
return -EFAULT;
ret = vm_insert_page(vma, vma->vm_start,
virt_to_page((void *)bnxt_entry->mem_offset));
break;
default:
ret = -EINVAL;
break;
......@@ -4180,7 +4195,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *
u64 mmap_offset;
u32 length;
u32 dpi;
u64 dbr;
u64 addr;
int err;
uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
......@@ -4202,19 +4217,30 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *
return -ENOMEM;
length = PAGE_SIZE;
dpi = uctx->wcdpi.dpi;
dbr = (u64)uctx->wcdpi.umdbr;
addr = (u64)uctx->wcdpi.umdbr;
mmap_flag = BNXT_RE_MMAP_WC_DB;
} else {
return -EINVAL;
}
break;
case BNXT_RE_ALLOC_DBR_BAR_PAGE:
length = PAGE_SIZE;
addr = (u64)rdev->pacing.dbr_bar_addr;
mmap_flag = BNXT_RE_MMAP_DBR_BAR;
break;
case BNXT_RE_ALLOC_DBR_PAGE:
length = PAGE_SIZE;
addr = (u64)rdev->pacing.dbr_page;
mmap_flag = BNXT_RE_MMAP_DBR_PAGE;
break;
default:
return -EOPNOTSUPP;
}
entry = bnxt_re_mmap_entry_insert(uctx, dbr, mmap_flag, &mmap_offset);
entry = bnxt_re_mmap_entry_insert(uctx, addr, mmap_flag, &mmap_offset);
if (!entry)
return -ENOMEM;
......@@ -4254,6 +4280,9 @@ static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
uctx->wcdpi.dbr = NULL;
}
break;
case BNXT_RE_MMAP_DBR_BAR:
case BNXT_RE_MMAP_DBR_PAGE:
break;
default:
goto exit;
}
......
......@@ -146,6 +146,8 @@ enum bnxt_re_mmap_flag {
BNXT_RE_MMAP_SH_PAGE,
BNXT_RE_MMAP_UC_DB,
BNXT_RE_MMAP_WC_DB,
BNXT_RE_MMAP_DBR_PAGE,
BNXT_RE_MMAP_DBR_BAR,
};
struct bnxt_re_user_mmap_entry {
......
......@@ -136,6 +136,8 @@ enum bnxt_re_objects {
enum bnxt_re_alloc_page_type {
BNXT_RE_ALLOC_WC_PAGE = 0,
BNXT_RE_ALLOC_DBR_BAR_PAGE,
BNXT_RE_ALLOC_DBR_PAGE,
};
enum bnxt_re_var_alloc_page_attrs {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment