Commit 9882d63b authored by Takashi Iwai's avatar Takashi Iwai

ALSA: memalloc: Drop x86-specific hack for WC allocations

The recent report for a crash on Haswell machines implied that the
x86-specific (rather hackish) implementation for write-cache memory
buffer allocation in ALSA core is buggy with the recent kernel in some
corner cases.  This patch drops the x86-specific implementation and
uses the standard dma_alloc_wc() & co generically for avoiding the bug
and also for simplification.

BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=216112
Cc: <stable@vger.kernel.org> # v5.18+
Link: https://lore.kernel.org/r/20220620073440.7514-1-tiwai@suse.deSigned-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parent d4995121
...@@ -431,33 +431,17 @@ static const struct snd_malloc_ops snd_dma_iram_ops = { ...@@ -431,33 +431,17 @@ static const struct snd_malloc_ops snd_dma_iram_ops = {
*/ */
static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size) static void *snd_dma_dev_alloc(struct snd_dma_buffer *dmab, size_t size)
{ {
void *p; return dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
p = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
#ifdef CONFIG_X86
if (p && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
set_memory_wc((unsigned long)p, PAGE_ALIGN(size) >> PAGE_SHIFT);
#endif
return p;
} }
static void snd_dma_dev_free(struct snd_dma_buffer *dmab) static void snd_dma_dev_free(struct snd_dma_buffer *dmab)
{ {
#ifdef CONFIG_X86
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
set_memory_wb((unsigned long)dmab->area,
PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
#endif
dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr); dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
} }
static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab, static int snd_dma_dev_mmap(struct snd_dma_buffer *dmab,
struct vm_area_struct *area) struct vm_area_struct *area)
{ {
#ifdef CONFIG_X86
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC)
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
#endif
return dma_mmap_coherent(dmab->dev.dev, area, return dma_mmap_coherent(dmab->dev.dev, area,
dmab->area, dmab->addr, dmab->bytes); dmab->area, dmab->addr, dmab->bytes);
} }
...@@ -471,10 +455,6 @@ static const struct snd_malloc_ops snd_dma_dev_ops = { ...@@ -471,10 +455,6 @@ static const struct snd_malloc_ops snd_dma_dev_ops = {
/* /*
* Write-combined pages * Write-combined pages
*/ */
#ifdef CONFIG_X86
/* On x86, share the same ops as the standard dev ops */
#define snd_dma_wc_ops snd_dma_dev_ops
#else /* CONFIG_X86 */
static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size) static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
{ {
return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP); return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
...@@ -497,7 +477,6 @@ static const struct snd_malloc_ops snd_dma_wc_ops = { ...@@ -497,7 +477,6 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
.free = snd_dma_wc_free, .free = snd_dma_wc_free,
.mmap = snd_dma_wc_mmap, .mmap = snd_dma_wc_mmap,
}; };
#endif /* CONFIG_X86 */
#ifdef CONFIG_SND_DMA_SGBUF #ifdef CONFIG_SND_DMA_SGBUF
static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size); static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment