Commit 10df3509 authored by Liam Girdwood's avatar Liam Girdwood Committed by Mark Brown

ASoC: Intel: Fix Audio DSP usage when IOMMU is enabled.

The Intel IOMMU requires that the ACPI device is used to allocate all
DMA memory buffers. This means we need to pass the DMA device pointer into child
component devices that allocate DMA memory.

We also only set the DMA mask for the ACPI device now instead of for each
component device.
Signed-off-by: default avatarLiam Girdwood <liam.r.girdwood@linux.intel.com>
Signed-off-by: default avatarMark Brown <broonie@linaro.org>
parent 0b708c87
...@@ -138,6 +138,7 @@ static int sst_acpi_probe(struct platform_device *pdev) ...@@ -138,6 +138,7 @@ static int sst_acpi_probe(struct platform_device *pdev)
sst_pdata = &sst_acpi->sst_pdata; sst_pdata = &sst_acpi->sst_pdata;
sst_pdata->id = desc->sst_id; sst_pdata->id = desc->sst_id;
sst_pdata->dma_dev = dev;
sst_acpi->desc = desc; sst_acpi->desc = desc;
sst_acpi->mach = mach; sst_acpi->mach = mach;
......
...@@ -228,6 +228,7 @@ struct sst_dsp { ...@@ -228,6 +228,7 @@ struct sst_dsp {
spinlock_t spinlock; /* IPC locking */ spinlock_t spinlock; /* IPC locking */
struct mutex mutex; /* DSP FW lock */ struct mutex mutex; /* DSP FW lock */
struct device *dev; struct device *dev;
struct device *dma_dev;
void *thread_context; void *thread_context;
int irq; int irq;
u32 id; u32 id;
......
...@@ -337,6 +337,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev, ...@@ -337,6 +337,7 @@ struct sst_dsp *sst_dsp_new(struct device *dev,
spin_lock_init(&sst->spinlock); spin_lock_init(&sst->spinlock);
mutex_init(&sst->mutex); mutex_init(&sst->mutex);
sst->dev = dev; sst->dev = dev;
sst->dma_dev = pdata->dma_dev;
sst->thread_context = sst_dev->thread_context; sst->thread_context = sst_dev->thread_context;
sst->sst_dev = sst_dev; sst->sst_dev = sst_dev;
sst->id = pdata->id; sst->id = pdata->id;
......
...@@ -169,6 +169,7 @@ struct sst_pdata { ...@@ -169,6 +169,7 @@ struct sst_pdata {
u32 dma_base; u32 dma_base;
u32 dma_size; u32 dma_size;
int dma_engine; int dma_engine;
struct device *dma_dev;
/* DSP */ /* DSP */
u32 id; u32 id;
......
...@@ -57,14 +57,8 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp, ...@@ -57,14 +57,8 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
sst_fw->private = private; sst_fw->private = private;
sst_fw->size = fw->size; sst_fw->size = fw->size;
err = dma_coerce_mask_and_coherent(dsp->dev, DMA_BIT_MASK(32));
if (err < 0) {
kfree(sst_fw);
return NULL;
}
/* allocate DMA buffer to store FW data */ /* allocate DMA buffer to store FW data */
sst_fw->dma_buf = dma_alloc_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
&sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL); &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
if (!sst_fw->dma_buf) { if (!sst_fw->dma_buf) {
dev_err(dsp->dev, "error: DMA alloc failed\n"); dev_err(dsp->dev, "error: DMA alloc failed\n");
...@@ -106,7 +100,7 @@ void sst_fw_free(struct sst_fw *sst_fw) ...@@ -106,7 +100,7 @@ void sst_fw_free(struct sst_fw *sst_fw)
list_del(&sst_fw->list); list_del(&sst_fw->list);
mutex_unlock(&dsp->mutex); mutex_unlock(&dsp->mutex);
dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf, dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
sst_fw->dmable_fw_paddr); sst_fw->dmable_fw_paddr);
kfree(sst_fw); kfree(sst_fw);
} }
......
...@@ -433,7 +433,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata) ...@@ -433,7 +433,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
int ret = -ENODEV, i, j, region_count; int ret = -ENODEV, i, j, region_count;
u32 offset, size; u32 offset, size;
dev = sst->dev; dev = sst->dma_dev;
switch (sst->id) { switch (sst->id) {
case SST_DEV_ID_LYNX_POINT: case SST_DEV_ID_LYNX_POINT:
...@@ -466,7 +466,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata) ...@@ -466,7 +466,7 @@ static int hsw_init(struct sst_dsp *sst, struct sst_pdata *pdata)
return ret; return ret;
} }
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
if (ret) if (ret)
return ret; return ret;
......
...@@ -633,17 +633,16 @@ static void hsw_pcm_free(struct snd_pcm *pcm) ...@@ -633,17 +633,16 @@ static void hsw_pcm_free(struct snd_pcm *pcm)
static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd) static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
{ {
struct snd_pcm *pcm = rtd->pcm; struct snd_pcm *pcm = rtd->pcm;
struct snd_soc_platform *platform = rtd->platform;
struct sst_pdata *pdata = dev_get_platdata(platform->dev);
struct device *dev = pdata->dma_dev;
int ret = 0; int ret = 0;
ret = dma_coerce_mask_and_coherent(rtd->card->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream || if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream ||
pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) {
ret = snd_pcm_lib_preallocate_pages_for_all(pcm, ret = snd_pcm_lib_preallocate_pages_for_all(pcm,
SNDRV_DMA_TYPE_DEV_SG, SNDRV_DMA_TYPE_DEV_SG,
rtd->card->dev, dev,
hsw_pcm_hardware.buffer_bytes_max, hsw_pcm_hardware.buffer_bytes_max,
hsw_pcm_hardware.buffer_bytes_max); hsw_pcm_hardware.buffer_bytes_max);
if (ret) { if (ret) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment