Commit 8d7c7c0e authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Leon Romanovsky

RDMA: Add ib_virt_dma_to_page()

Make it clearer what is going on by adding a function to go back from the
"virtual" dma_addr to a kva and another to a struct page. This is used in the
ib_uses_virt_dma() style drivers (siw, rxe, hfi, qib).

Call them instead of a naked casting and  virt_to_page() when working with dma_addr
values encoded by the various ib_map functions.

This also fixes the virt_to_page() casting problem Linus Walleij has been
chasing.

Cc: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/0-v2-05ea785520ed+10-ib_virt_page_jgg@nvidia.comSigned-off-by: default avatarLeon Romanovsky <leon@kernel.org>
parent b2b1ddc4
...@@ -210,10 +210,10 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr) ...@@ -210,10 +210,10 @@ int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr)
return err; return err;
} }
static int rxe_set_page(struct ib_mr *ibmr, u64 iova) static int rxe_set_page(struct ib_mr *ibmr, u64 dma_addr)
{ {
struct rxe_mr *mr = to_rmr(ibmr); struct rxe_mr *mr = to_rmr(ibmr);
struct page *page = virt_to_page(iova & mr->page_mask); struct page *page = ib_virt_dma_to_page(dma_addr);
bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT); bool persistent = !!(mr->access & IB_ACCESS_FLUSH_PERSISTENT);
int err; int err;
...@@ -279,16 +279,16 @@ static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, ...@@ -279,16 +279,16 @@ static int rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr,
return 0; return 0;
} }
static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr, static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 dma_addr, void *addr,
unsigned int length, enum rxe_mr_copy_dir dir) unsigned int length, enum rxe_mr_copy_dir dir)
{ {
unsigned int page_offset = iova & (PAGE_SIZE - 1); unsigned int page_offset = dma_addr & (PAGE_SIZE - 1);
unsigned int bytes; unsigned int bytes;
struct page *page; struct page *page;
u8 *va; u8 *va;
while (length) { while (length) {
page = virt_to_page(iova & mr->page_mask); page = ib_virt_dma_to_page(dma_addr);
bytes = min_t(unsigned int, length, bytes = min_t(unsigned int, length,
PAGE_SIZE - page_offset); PAGE_SIZE - page_offset);
va = kmap_local_page(page); va = kmap_local_page(page);
...@@ -300,7 +300,7 @@ static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr, ...@@ -300,7 +300,7 @@ static void rxe_mr_copy_dma(struct rxe_mr *mr, u64 iova, void *addr,
kunmap_local(va); kunmap_local(va);
page_offset = 0; page_offset = 0;
iova += bytes; dma_addr += bytes;
addr += bytes; addr += bytes;
length -= bytes; length -= bytes;
} }
...@@ -488,7 +488,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, ...@@ -488,7 +488,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
if (mr->ibmr.type == IB_MR_TYPE_DMA) { if (mr->ibmr.type == IB_MR_TYPE_DMA) {
page_offset = iova & (PAGE_SIZE - 1); page_offset = iova & (PAGE_SIZE - 1);
page = virt_to_page(iova & PAGE_MASK); page = ib_virt_dma_to_page(iova);
} else { } else {
unsigned long index; unsigned long index;
int err; int err;
...@@ -545,7 +545,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) ...@@ -545,7 +545,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
if (mr->ibmr.type == IB_MR_TYPE_DMA) { if (mr->ibmr.type == IB_MR_TYPE_DMA) {
page_offset = iova & (PAGE_SIZE - 1); page_offset = iova & (PAGE_SIZE - 1);
page = virt_to_page(iova & PAGE_MASK); page = ib_virt_dma_to_page(iova);
} else { } else {
unsigned long index; unsigned long index;
int err; int err;
......
...@@ -760,7 +760,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe, ...@@ -760,7 +760,7 @@ static void copy_inline_data_to_wqe(struct rxe_send_wqe *wqe,
int i; int i;
for (i = 0; i < ibwr->num_sge; i++, sge++) { for (i = 0; i < ibwr->num_sge; i++, sge++) {
memcpy(p, (void *)(uintptr_t)sge->addr, sge->length); memcpy(p, ib_virt_dma_to_page(sge->addr), sge->length);
p += sge->length; p += sge->length;
} }
} }
......
...@@ -139,7 +139,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx, ...@@ -139,7 +139,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
break; break;
bytes = min(bytes, len); bytes = min(bytes, len);
if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) == if (siw_rx_kva(srx, ib_virt_dma_to_ptr(buf_addr), bytes) ==
bytes) { bytes) {
copied += bytes; copied += bytes;
offset += bytes; offset += bytes;
...@@ -487,7 +487,7 @@ int siw_proc_send(struct siw_qp *qp) ...@@ -487,7 +487,7 @@ int siw_proc_send(struct siw_qp *qp)
mem_p = *mem; mem_p = *mem;
if (mem_p->mem_obj == NULL) if (mem_p->mem_obj == NULL)
rv = siw_rx_kva(srx, rv = siw_rx_kva(srx,
(void *)(uintptr_t)(sge->laddr + frx->sge_off), ib_virt_dma_to_ptr(sge->laddr + frx->sge_off),
sge_bytes); sge_bytes);
else if (!mem_p->is_pbl) else if (!mem_p->is_pbl)
rv = siw_rx_umem(srx, mem_p->umem, rv = siw_rx_umem(srx, mem_p->umem,
...@@ -852,7 +852,7 @@ int siw_proc_rresp(struct siw_qp *qp) ...@@ -852,7 +852,7 @@ int siw_proc_rresp(struct siw_qp *qp)
if (mem_p->mem_obj == NULL) if (mem_p->mem_obj == NULL)
rv = siw_rx_kva(srx, rv = siw_rx_kva(srx,
(void *)(uintptr_t)(sge->laddr + wqe->processed), ib_virt_dma_to_ptr(sge->laddr + wqe->processed),
bytes); bytes);
else if (!mem_p->is_pbl) else if (!mem_p->is_pbl)
rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed, rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
......
...@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) ...@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
if (paddr) if (paddr)
return virt_to_page((void *)(uintptr_t)paddr); return ib_virt_dma_to_page(paddr);
return NULL; return NULL;
} }
...@@ -56,8 +56,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr) ...@@ -56,8 +56,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
if (!mem->mem_obj) { if (!mem->mem_obj) {
/* Kernel client using kva */ /* Kernel client using kva */
memcpy(paddr, memcpy(paddr, ib_virt_dma_to_ptr(sge->laddr), bytes);
(const void *)(uintptr_t)sge->laddr, bytes);
} else if (c_tx->in_syscall) { } else if (c_tx->in_syscall) {
if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr), if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
bytes)) bytes))
...@@ -477,7 +476,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) ...@@ -477,7 +476,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
* or memory region with assigned kernel buffer * or memory region with assigned kernel buffer
*/ */
iov[seg].iov_base = iov[seg].iov_base =
(void *)(uintptr_t)(sge->laddr + sge_off); ib_virt_dma_to_ptr(sge->laddr + sge_off);
iov[seg].iov_len = sge_len; iov[seg].iov_len = sge_len;
if (do_crc) if (do_crc)
...@@ -537,19 +536,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) ...@@ -537,19 +536,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
* Cast to an uintptr_t to preserve all 64 bits * Cast to an uintptr_t to preserve all 64 bits
* in sge->laddr. * in sge->laddr.
*/ */
uintptr_t va = (uintptr_t)(sge->laddr + sge_off); u64 va = sge->laddr + sge_off;
/* page_array[seg] = ib_virt_dma_to_page(va);
* virt_to_page() takes a (void *) pointer
* so cast to a (void *) meaning it will be 64
* bits on a 64 bit platform and 32 bits on a
* 32 bit platform.
*/
page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
if (do_crc) if (do_crc)
crypto_shash_update( crypto_shash_update(
c_tx->mpa_crc_hd, c_tx->mpa_crc_hd,
(void *)va, ib_virt_dma_to_ptr(va),
plen); plen);
} }
......
...@@ -660,7 +660,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, ...@@ -660,7 +660,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
bytes = -EINVAL; bytes = -EINVAL;
break; break;
} }
memcpy(kbuf, (void *)(uintptr_t)core_sge->addr, memcpy(kbuf, ib_virt_dma_to_ptr(core_sge->addr),
core_sge->length); core_sge->length);
kbuf += core_sge->length; kbuf += core_sge->length;
...@@ -1523,7 +1523,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle, ...@@ -1523,7 +1523,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
} }
siw_dbg_mem(mem, siw_dbg_mem(mem,
"sge[%d], size %u, addr 0x%p, total %lu\n", "sge[%d], size %u, addr 0x%p, total %lu\n",
i, pble->size, (void *)(uintptr_t)pble->addr, i, pble->size, ib_virt_dma_to_ptr(pble->addr),
pbl_size); pbl_size);
} }
rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page); rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
......
...@@ -4035,6 +4035,31 @@ static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev) ...@@ -4035,6 +4035,31 @@ static inline bool ib_dma_pci_p2p_dma_supported(struct ib_device *dev)
return dma_pci_p2pdma_supported(dev->dma_device); return dma_pci_p2pdma_supported(dev->dma_device);
} }
/**
* ib_virt_dma_to_ptr - Convert a dma_addr to a kernel pointer
* @dma_addr: The DMA address
*
* Used by ib_uses_virt_dma() devices to get back to the kernel pointer after
* going through the dma_addr marshalling.
*/
static inline void *ib_virt_dma_to_ptr(u64 dma_addr)
{
/* virt_dma mode maps the kvs's directly into the dma addr */
return (void *)(uintptr_t)dma_addr;
}
/**
* ib_virt_dma_to_page - Convert a dma_addr to a struct page
* @dma_addr: The DMA address
*
* Used by ib_uses_virt_dma() device to get back to the struct page after going
* through the dma_addr marshalling.
*/
static inline struct page *ib_virt_dma_to_page(u64 dma_addr)
{
return virt_to_page(ib_virt_dma_to_ptr(dma_addr));
}
/** /**
* ib_dma_mapping_error - check a DMA addr for error * ib_dma_mapping_error - check a DMA addr for error
* @dev: The device for which the dma_addr was created * @dev: The device for which the dma_addr was created
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment