Commit 83781384 authored by Gerd Bayer's avatar Gerd Bayer Committed by David S. Miller

s390/ism: Properly fix receive message buffer allocation

Since [1], dma_alloc_coherent() does not accept requests for GFP_COMP
anymore, even on archs that may be able to fulfill this. Functionality that
relied on the receive buffer being a compound page broke at that point:
The SMC-D protocol, that utilizes the ism device driver, passes receive
buffers to the splice processor in a struct splice_pipe_desc with a
single entry list of struct pages. As the buffer is no longer a compound
page, the splice processor now rejects requests to handle more than a
page worth of data.

Replace dma_alloc_coherent() and allocate a buffer with folio_alloc and
create a DMA map for it with dma_map_page(). Since only receive buffers
on ISM devices use DMA, qualify the mapping as FROM_DEVICE.
Since ISM devices are available on arch s390, only, and on that arch all
DMA is coherent, there is no need to introduce and export some kind of
dma_sync_to_cpu() method to be called by the SMC-D protocol layer.

Analogously, replace dma_free_coherent by a two step dma_unmap_page,
then folio_put to free the receive buffer.

[1] https://lore.kernel.org/all/20221113163535.884299-1-hch@lst.de/

Fixes: c08004ee ("s390/ism: don't pass bogus GFP_ flags to dma_alloc_coherent")
Signed-off-by: default avatarGerd Bayer <gbayer@linux.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent cb178ccb
...@@ -292,13 +292,16 @@ static int ism_read_local_gid(struct ism_dev *ism) ...@@ -292,13 +292,16 @@ static int ism_read_local_gid(struct ism_dev *ism)
static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb) static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{ {
clear_bit(dmb->sba_idx, ism->sba_bitmap); clear_bit(dmb->sba_idx, ism->sba_bitmap);
dma_free_coherent(&ism->pdev->dev, dmb->dmb_len, dma_unmap_page(&ism->pdev->dev, dmb->dma_addr, dmb->dmb_len,
dmb->cpu_addr, dmb->dma_addr); DMA_FROM_DEVICE);
folio_put(virt_to_folio(dmb->cpu_addr));
} }
static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb) static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
{ {
struct folio *folio;
unsigned long bit; unsigned long bit;
int rc;
if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev)) if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev))
return -EINVAL; return -EINVAL;
...@@ -315,14 +318,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb) ...@@ -315,14 +318,30 @@ static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb)
test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) test_and_set_bit(dmb->sba_idx, ism->sba_bitmap))
return -EINVAL; return -EINVAL;
dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len, folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
&dmb->dma_addr, __GFP_NORETRY, get_order(dmb->dmb_len));
GFP_KERNEL | __GFP_NOWARN |
__GFP_NOMEMALLOC | __GFP_NORETRY);
if (!dmb->cpu_addr)
clear_bit(dmb->sba_idx, ism->sba_bitmap);
return dmb->cpu_addr ? 0 : -ENOMEM; if (!folio) {
rc = -ENOMEM;
goto out_bit;
}
dmb->cpu_addr = folio_address(folio);
dmb->dma_addr = dma_map_page(&ism->pdev->dev,
virt_to_page(dmb->cpu_addr), 0,
dmb->dmb_len, DMA_FROM_DEVICE);
if (dma_mapping_error(&ism->pdev->dev, dmb->dma_addr)) {
rc = -ENOMEM;
goto out_free;
}
return 0;
out_free:
kfree(dmb->cpu_addr);
out_bit:
clear_bit(dmb->sba_idx, ism->sba_bitmap);
return rc;
} }
int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment