Commit b1c15835 authored by Wei Hu\(Xavier\)'s avatar Wei Hu\(Xavier\) Committed by Jason Gunthorpe

RDMA/hns: Get rid of virt_to_page and vmap calls after dma_alloc_coherent

In general dma_alloc_coherent() returns a CPU virtual address and
a DMA address, and we have no guarantee that the virtual address
is either in the linear map or vmalloc. It could be in  some other special
place. We have no guarantee that the underlying memory even has
an associated struct page at all.

In current code, there are incorrect usage as below:
dma_alloc_coherent + virt_to_page + vmap. There will probably
introduce coherency problem. This patch fixes it to get rid of
virt_to_page and vmap calls at Leon's suggestion. The related
link: https://lkml.org/lkml/2017/11/7/34

Fixes: 9a443537("IB/hns: Add driver files for hns RoCE driver")
Signed-off-by: default avatarWei Hu (Xavier) <xavier.huwei@huawei.com>
Signed-off-by: default avatarShaobo Xu <xushaobo2@huawei.com>
Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarYixian Liu <liuyixian@huawei.com>
Signed-off-by: default avatarXiping Zhang (Francis) <zhangxiping3@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent db270c41
...@@ -162,14 +162,10 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, ...@@ -162,14 +162,10 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
{ {
int i; int i;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG;
if (buf->nbufs == 1) { if (buf->nbufs == 1) {
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map); dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
} else { } else {
if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i) for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf) if (buf->page_list[i].buf)
dma_free_coherent(dev, 1 << buf->page_shift, dma_free_coherent(dev, 1 << buf->page_shift,
...@@ -185,9 +181,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, ...@@ -185,9 +181,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
{ {
int i = 0; int i = 0;
dma_addr_t t; dma_addr_t t;
struct page **pages;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG;
u32 page_size = 1 << page_shift; u32 page_size = 1 << page_shift;
u32 order; u32 order;
...@@ -236,23 +230,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, ...@@ -236,23 +230,6 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
buf->page_list[i].map = t; buf->page_list[i].map = t;
memset(buf->page_list[i].buf, 0, page_size); memset(buf->page_list[i].buf, 0, page_size);
} }
if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
pages = kmalloc_array(buf->nbufs, sizeof(*pages),
GFP_KERNEL);
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->page_list[i].buf);
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP,
PAGE_KERNEL);
kfree(pages);
if (!buf->direct.buf)
goto err_free;
} else {
buf->direct.buf = NULL;
}
} }
return 0; return 0;
......
...@@ -726,11 +726,9 @@ static inline struct hns_roce_qp ...@@ -726,11 +726,9 @@ static inline struct hns_roce_qp
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset) static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
{ {
u32 bits_per_long_val = BITS_PER_LONG;
u32 page_size = 1 << buf->page_shift; u32 page_size = 1 << buf->page_shift;
if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) || if (buf->nbufs == 1)
buf->nbufs == 1)
return (char *)(buf->direct.buf) + offset; return (char *)(buf->direct.buf) + offset;
else else
return (char *)(buf->page_list[offset >> buf->page_shift].buf) + return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment