Commit c32a5fbc authored by Takashi Iwai's avatar Takashi Iwai

Merge branch 'topic/dma-fix2' into for-next

Pull the fix for potential PCM SG-buffer problems.

Link: https://lore.kernel.org/r/20200615160045.2703-1-tiwai@suse.deSigned-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parents 67539867 3ad796cb
...@@ -94,7 +94,11 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, ...@@ -94,7 +94,11 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
size_t offset) size_t offset)
{ {
struct snd_sg_buf *sgbuf = dmab->private_data; struct snd_sg_buf *sgbuf = dmab->private_data;
dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr; dma_addr_t addr;
if (!sgbuf)
return dmab->addr + offset;
addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
addr &= ~((dma_addr_t)PAGE_SIZE - 1); addr &= ~((dma_addr_t)PAGE_SIZE - 1);
return addr + offset % PAGE_SIZE; return addr + offset % PAGE_SIZE;
} }
...@@ -106,6 +110,9 @@ static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab, ...@@ -106,6 +110,9 @@ static inline void *snd_sgbuf_get_ptr(struct snd_dma_buffer *dmab,
size_t offset) size_t offset)
{ {
struct snd_sg_buf *sgbuf = dmab->private_data; struct snd_sg_buf *sgbuf = dmab->private_data;
if (!sgbuf)
return dmab->area + offset;
return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE; return sgbuf->table[offset >> PAGE_SHIFT].buf + offset % PAGE_SIZE;
} }
......
...@@ -135,16 +135,17 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size, ...@@ -135,16 +135,17 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
dmab->dev.type = type; dmab->dev.type = type;
dmab->dev.dev = device; dmab->dev.dev = device;
dmab->bytes = 0; dmab->bytes = 0;
dmab->area = NULL;
dmab->addr = 0;
dmab->private_data = NULL;
switch (type) { switch (type) {
case SNDRV_DMA_TYPE_CONTINUOUS: case SNDRV_DMA_TYPE_CONTINUOUS:
gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL); gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL);
dmab->area = alloc_pages_exact(size, gfp); dmab->area = alloc_pages_exact(size, gfp);
dmab->addr = 0;
break; break;
case SNDRV_DMA_TYPE_VMALLOC: case SNDRV_DMA_TYPE_VMALLOC:
gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM); gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
dmab->area = __vmalloc(size, gfp); dmab->area = __vmalloc(size, gfp);
dmab->addr = 0;
break; break;
#ifdef CONFIG_HAS_DMA #ifdef CONFIG_HAS_DMA
#ifdef CONFIG_GENERIC_ALLOCATOR #ifdef CONFIG_GENERIC_ALLOCATOR
...@@ -171,8 +172,6 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size, ...@@ -171,8 +172,6 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
#endif #endif
default: default:
pr_err("snd-malloc: invalid device type %d\n", type); pr_err("snd-malloc: invalid device type %d\n", type);
dmab->area = NULL;
dmab->addr = 0;
return -ENXIO; return -ENXIO;
} }
if (! dmab->area) if (! dmab->area)
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/dma-mapping.h>
#include <sound/core.h> #include <sound/core.h>
#include <sound/pcm.h> #include <sound/pcm.h>
#include <sound/info.h> #include <sound/info.h>
...@@ -39,6 +40,18 @@ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev, ...@@ -39,6 +40,18 @@ static int do_alloc_pages(struct snd_card *card, int type, struct device *dev,
if (max_alloc_per_card && if (max_alloc_per_card &&
card->total_pcm_alloc_bytes + size > max_alloc_per_card) card->total_pcm_alloc_bytes + size > max_alloc_per_card)
return -ENOMEM; return -ENOMEM;
if (IS_ENABLED(CONFIG_SND_DMA_SGBUF) &&
(type == SNDRV_DMA_TYPE_DEV_SG || type == SNDRV_DMA_TYPE_DEV_UC_SG) &&
!dma_is_direct(get_dma_ops(dev))) {
/* mutate to continuous page allocation */
dev_dbg(dev, "Use continuous page allocator\n");
if (type == SNDRV_DMA_TYPE_DEV_SG)
type = SNDRV_DMA_TYPE_DEV;
else
type = SNDRV_DMA_TYPE_DEV_UC;
}
err = snd_dma_alloc_pages(type, dev, size, dmab); err = snd_dma_alloc_pages(type, dev, size, dmab);
if (!err) { if (!err) {
mutex_lock(&card->memory_mutex); mutex_lock(&card->memory_mutex);
......
...@@ -3713,7 +3713,6 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, ...@@ -3713,7 +3713,6 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
area->vm_end - area->vm_start, area->vm_page_prot); area->vm_end - area->vm_start, area->vm_page_prot);
} }
#endif /* CONFIG_GENERIC_ALLOCATOR */ #endif /* CONFIG_GENERIC_ALLOCATOR */
#ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */
if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page && if (IS_ENABLED(CONFIG_HAS_DMA) && !substream->ops->page &&
(substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV || (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV ||
substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC)) substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_UC))
...@@ -3722,7 +3721,6 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, ...@@ -3722,7 +3721,6 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream,
substream->runtime->dma_area, substream->runtime->dma_area,
substream->runtime->dma_addr, substream->runtime->dma_addr,
substream->runtime->dma_bytes); substream->runtime->dma_bytes);
#endif /* CONFIG_X86 */
/* mmap with fault handler */ /* mmap with fault handler */
area->vm_ops = &snd_pcm_vm_ops_data_fault; area->vm_ops = &snd_pcm_vm_ops_data_fault;
return 0; return 0;
......
...@@ -142,6 +142,9 @@ unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab, ...@@ -142,6 +142,9 @@ unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
struct snd_sg_buf *sg = dmab->private_data; struct snd_sg_buf *sg = dmab->private_data;
unsigned int start, end, pg; unsigned int start, end, pg;
if (!sg)
return size;
start = ofs >> PAGE_SHIFT; start = ofs >> PAGE_SHIFT;
end = (ofs + size - 1) >> PAGE_SHIFT; end = (ofs + size - 1) >> PAGE_SHIFT;
/* check page continuity */ /* check page continuity */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment