Commit cbc9355a authored by Chuck Lever's avatar Chuck Lever Committed by Doug Ledford

IB/mlx4: Prevent cross page boundary allocation

Prevent cross page boundary allocation by allocating
new page, this is required to be aligned with ConnectX-3 HW
requirements.

Not doing that might cause to "RDMA read local protection" error.

Fixes: 1b2cd0fc ('IB/mlx4: Support the new memory registration API')
Suggested-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5b420d9c
...@@ -139,7 +139,7 @@ struct mlx4_ib_mr { ...@@ -139,7 +139,7 @@ struct mlx4_ib_mr {
u32 max_pages; u32 max_pages;
struct mlx4_mr mmr; struct mlx4_mr mmr;
struct ib_umem *umem; struct ib_umem *umem;
void *pages_alloc; size_t page_map_size;
}; };
struct mlx4_ib_mw { struct mlx4_ib_mw {
......
...@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device, ...@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
struct mlx4_ib_mr *mr, struct mlx4_ib_mr *mr,
int max_pages) int max_pages)
{ {
int size = max_pages * sizeof(u64);
int add_size;
int ret; int ret;
add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0); /* Ensure that size is aligned to DMA cacheline
* requirements.
* max_pages is limited to MLX4_MAX_FAST_REG_PAGES
* so page_map_size will never cross PAGE_SIZE.
*/
mr->page_map_size = roundup(max_pages * sizeof(u64),
MLX4_MR_PAGES_ALIGN);
mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL); /* Prevent cross page boundary allocation. */
if (!mr->pages_alloc) mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
if (!mr->pages)
return -ENOMEM; return -ENOMEM;
mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
mr->page_map = dma_map_single(device->dma_device, mr->pages, mr->page_map = dma_map_single(device->dma_device, mr->pages,
size, DMA_TO_DEVICE); mr->page_map_size, DMA_TO_DEVICE);
if (dma_mapping_error(device->dma_device, mr->page_map)) { if (dma_mapping_error(device->dma_device, mr->page_map)) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device, ...@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
} }
return 0; return 0;
err:
kfree(mr->pages_alloc);
err:
free_page((unsigned long)mr->pages);
return ret; return ret;
} }
...@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr) ...@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
{ {
if (mr->pages) { if (mr->pages) {
struct ib_device *device = mr->ibmr.device; struct ib_device *device = mr->ibmr.device;
int size = mr->max_pages * sizeof(u64);
dma_unmap_single(device->dma_device, mr->page_map, dma_unmap_single(device->dma_device, mr->page_map,
size, DMA_TO_DEVICE); mr->page_map_size, DMA_TO_DEVICE);
kfree(mr->pages_alloc); free_page((unsigned long)mr->pages);
mr->pages = NULL; mr->pages = NULL;
} }
} }
...@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, ...@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
mr->npages = 0; mr->npages = 0;
ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map, ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
sizeof(u64) * mr->max_pages, mr->page_map_size, DMA_TO_DEVICE);
DMA_TO_DEVICE);
rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page); rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
ib_dma_sync_single_for_device(ibmr->device, mr->page_map, ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
sizeof(u64) * mr->max_pages, mr->page_map_size, DMA_TO_DEVICE);
DMA_TO_DEVICE);
return rc; return rc;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment