Commit 87aebd3f authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/pvrdma: Use ib_umem_num_dma_blocks() instead of ib_umem_page_count()

This driver always uses PAGE_SIZE.

Link: https://lore.kernel.org/r/14-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.comSigned-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent b8387f81
......@@ -142,7 +142,7 @@ int pvrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_cq;
}
npages = ib_umem_page_count(cq->umem);
npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
} else {
/* One extra page for shared ring state */
npages = 1 + (entries * sizeof(struct pvrdma_cqe) +
......
......@@ -298,9 +298,11 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
goto err_qp;
}
qp->npages_send = ib_umem_page_count(qp->sumem);
qp->npages_send =
ib_umem_num_dma_blocks(qp->sumem, PAGE_SIZE);
if (!is_srq)
qp->npages_recv = ib_umem_page_count(qp->rumem);
qp->npages_recv = ib_umem_num_dma_blocks(
qp->rumem, PAGE_SIZE);
else
qp->npages_recv = 0;
qp->npages = qp->npages_send + qp->npages_recv;
......
......@@ -152,7 +152,7 @@ int pvrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
goto err_srq;
}
srq->npages = ib_umem_page_count(srq->umem);
srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE);
if (srq->npages < 0 || srq->npages > PVRDMA_PAGE_DIR_MAX_PAGES) {
dev_warn(&dev->pdev->dev,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment