Commit 334304ac authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: don't return errors from dma_set_max_seg_size

A NULL dev->dma_parms indicates either a bus that is not DMA capable or
grave bug in the implementation of the bus code.

There isn't much the driver can do in terms of error handling for either
case, so just warn and continue as DMA operations will fail anyway.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Acked-by: Ulf Hansson <ulf.hansson@linaro.org> # For MMC
parent 560a861a
......@@ -447,9 +447,7 @@ static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
return ret;
ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
if (ret)
return ret;
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
if (IS_ERR(qdev->bar_0))
......
......@@ -598,9 +598,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev;
ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
if (ret)
return ret;
dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
ret = dma_async_device_register(&idma64->dma);
if (ret)
......
......@@ -3163,10 +3163,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
* This is the limit for transfers with a buswidth of 1, larger
* buswidths will have larger limits.
*/
ret = dma_set_max_seg_size(&adev->dev, 1900800);
if (ret)
dev_err(&adev->dev, "unable to set the seg size\n");
dma_set_max_seg_size(&adev->dev, 1900800);
init_pl330_debugfs(pl330);
dev_info(&adev->dev,
......
......@@ -1325,11 +1325,7 @@ static int bam_dma_probe(struct platform_device *pdev)
/* set max dma segment size */
bdev->common.dev = bdev->dev;
ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
if (ret) {
dev_err(bdev->dev, "cannot set maximum segment size\n");
goto err_bam_channel_exit;
}
dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
platform_set_drvdata(pdev, bdev);
......
......@@ -1868,9 +1868,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
if (ret)
return ret;
dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
if (ret)
......
......@@ -3632,11 +3632,7 @@ static int __init d40_probe(struct platform_device *pdev)
if (ret)
goto destroy_cache;
ret = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
if (ret) {
d40_err(dev, "Failed to set dma max seg size\n");
goto destroy_cache;
}
dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
d40_hw_init(base);
......
......@@ -559,11 +559,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
* Configure the DMA segment size to make sure we get contiguous IOVA
* when importing PRIME buffers.
*/
ret = dma_set_max_seg_size(dma_dev, UINT_MAX);
if (ret) {
dev_err(dma_dev, "Failed to set DMA segment size\n");
goto err_component_unbind;
}
dma_set_max_seg_size(dma_dev, UINT_MAX);
ret = drm_vblank_init(drm, MAX_CRTC);
if (ret < 0)
......
......@@ -854,8 +854,7 @@ int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
return -ENODEV;
}
if (dma_get_max_seg_size(dev) < size)
return dma_set_max_seg_size(dev, size);
dma_set_max_seg_size(dev, size);
return 0;
}
EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
......
......@@ -576,9 +576,7 @@ static int ipu6_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return dev_err_probe(dev, ret, "Failed to set DMA mask\n");
ret = dma_set_max_seg_size(dev, UINT_MAX);
if (ret)
return dev_err_probe(dev, ret, "Failed to set max_seg_size\n");
dma_set_max_seg_size(dev, UINT_MAX);
ret = ipu6_pci_config_setup(pdev, isp->hw_ver);
if (ret)
......
......@@ -213,7 +213,8 @@ static int sdmmc_idma_setup(struct mmci_host *host)
host->mmc->max_seg_size = host->mmc->max_req_size;
}
return dma_set_max_seg_size(dev, host->mmc->max_seg_size);
dma_set_max_seg_size(dev, host->mmc->max_seg_size);
return 0;
}
static int sdmmc_idma_start(struct mmci_host *host, unsigned int *datactrl)
......
......@@ -1496,11 +1496,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto release_region;
err = dma_set_max_seg_size(&pdev->dev, UINT_MAX);
if (err) {
dev_err(&pdev->dev, "Failed to set dma device segment size\n");
goto release_region;
}
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
gc = vzalloc(sizeof(*gc));
......
......@@ -13861,12 +13861,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"6400 Can't set dma maximum segment size\n");
return rc;
}
dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
/*
* Check whether the adapter supports an embedded copy of the
......
......@@ -524,13 +524,11 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev)
return SZ_64K;
}
static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
static inline void dma_set_max_seg_size(struct device *dev, unsigned int size)
{
if (dev->dma_parms) {
dev->dma_parms->max_segment_size = size;
return 0;
}
return -EIO;
if (WARN_ON_ONCE(!dev->dma_parms))
return;
dev->dma_parms->max_segment_size = size;
}
static inline unsigned long dma_get_seg_boundary(struct device *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment