Commit a5003fc0 authored by Takashi Iwai's avatar Takashi Iwai

[ALSA] emu10k1 - simplify page allocation for synth

Simplify the page allocation of emu10k1 driver for emux synth support.
Since these pages aren't be necessarily coherent, we can avoid
expensive DMA-coherent routines.
Signed-off-by: default avatarTakashi Iwai <tiwai@suse.de>
parent 2621f033
...@@ -437,43 +437,46 @@ static void get_single_page_range(struct snd_util_memhdr *hdr, ...@@ -437,43 +437,46 @@ static void get_single_page_range(struct snd_util_memhdr *hdr,
*last_page_ret = last_page; *last_page_ret = last_page;
} }
/* release allocated pages */
static void __synth_free_pages(struct snd_emu10k1 *emu, int first_page,
int last_page)
{
int page;
for (page = first_page; page <= last_page; page++) {
free_page((unsigned long)emu->page_ptr_table[page]);
emu->page_addr_table[page] = 0;
emu->page_ptr_table[page] = NULL;
}
}
/* /*
* allocate kernel pages * allocate kernel pages
*/ */
static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
{ {
int page, first_page, last_page; int page, first_page, last_page;
struct snd_dma_buffer dmab;
emu10k1_memblk_init(blk); emu10k1_memblk_init(blk);
get_single_page_range(emu->memhdr, blk, &first_page, &last_page); get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
/* allocate kernel pages */ /* allocate kernel pages */
for (page = first_page; page <= last_page; page++) { for (page = first_page; page <= last_page; page++) {
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), /* first try to allocate from <4GB zone */
PAGE_SIZE, &dmab) < 0) struct page *p = alloc_page(GFP_KERNEL | GFP_DMA32 |
goto __fail; __GFP_NOWARN);
if (! is_valid_page(emu, dmab.addr)) { if (!p || (page_to_pfn(p) & ~(emu->dma_mask >> PAGE_SHIFT)))
snd_dma_free_pages(&dmab); /* try to allocate from <16MB zone */
goto __fail; p = alloc_page(GFP_DMA |
__GFP_NORETRY | /* no OOM-killer */
__GFP_NOWARN);
if (!p) {
__synth_free_pages(emu, first_page, page - 1);
return -ENOMEM;
} }
emu->page_addr_table[page] = dmab.addr; emu->page_addr_table[page] = page_to_phys(p);
emu->page_ptr_table[page] = dmab.area; emu->page_ptr_table[page] = page_address(p);
} }
return 0; return 0;
__fail:
/* release allocated pages */
last_page = page - 1;
for (page = first_page; page <= last_page; page++) {
dmab.area = emu->page_ptr_table[page];
dmab.addr = emu->page_addr_table[page];
dmab.bytes = PAGE_SIZE;
snd_dma_free_pages(&dmab);
emu->page_addr_table[page] = 0;
emu->page_ptr_table[page] = NULL;
}
return -ENOMEM;
} }
/* /*
...@@ -481,23 +484,10 @@ static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk ...@@ -481,23 +484,10 @@ static int synth_alloc_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk
*/ */
static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk) static int synth_free_pages(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
{ {
int page, first_page, last_page; int first_page, last_page;
struct snd_dma_buffer dmab;
get_single_page_range(emu->memhdr, blk, &first_page, &last_page); get_single_page_range(emu->memhdr, blk, &first_page, &last_page);
dmab.dev.type = SNDRV_DMA_TYPE_DEV; __synth_free_pages(emu, first_page, last_page);
dmab.dev.dev = snd_dma_pci_data(emu->pci);
for (page = first_page; page <= last_page; page++) {
if (emu->page_ptr_table[page] == NULL)
continue;
dmab.area = emu->page_ptr_table[page];
dmab.addr = emu->page_addr_table[page];
dmab.bytes = PAGE_SIZE;
snd_dma_free_pages(&dmab);
emu->page_addr_table[page] = 0;
emu->page_ptr_table[page] = NULL;
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment