Commit 27bc944c authored by Peter Ujfalusi's avatar Peter Ujfalusi Committed by Vinod Koul

dmaengine: bcm2835-dma: Convert to use DMA pool

f9317829 dmaengine: bcm2835-dma: Fix memory leak when stopping a
	     running transfer

Fixed the memleak, but introduced another issue: the terminate_all callback
might be called with interrupts disabled and the dma_free_coherent() is
not allowed to be called when IRQs are disabled.
Convert the driver to use dma_pool_* for managing the list of control
blocks for the transfer.

Fixes: f9317829 ("dmaengine: bcm2835-dma: Fix memory leak when stopping a running transfer")
Signed-off-by: default avatarPeter Ujfalusi <peter.ujfalusi@ti.com>
Tested-by: default avatarMatthias Reichl <hias@horus.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent ef10b0b2
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
*/ */
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -62,6 +63,11 @@ struct bcm2835_dma_cb { ...@@ -62,6 +63,11 @@ struct bcm2835_dma_cb {
uint32_t pad[2]; uint32_t pad[2];
}; };
struct bcm2835_cb_entry {
struct bcm2835_dma_cb *cb;
dma_addr_t paddr;
};
struct bcm2835_chan { struct bcm2835_chan {
struct virt_dma_chan vc; struct virt_dma_chan vc;
struct list_head node; struct list_head node;
...@@ -72,18 +78,18 @@ struct bcm2835_chan { ...@@ -72,18 +78,18 @@ struct bcm2835_chan {
int ch; int ch;
struct bcm2835_desc *desc; struct bcm2835_desc *desc;
struct dma_pool *cb_pool;
void __iomem *chan_base; void __iomem *chan_base;
int irq_number; int irq_number;
}; };
struct bcm2835_desc { struct bcm2835_desc {
struct bcm2835_chan *c;
struct virt_dma_desc vd; struct virt_dma_desc vd;
enum dma_transfer_direction dir; enum dma_transfer_direction dir;
unsigned int control_block_size; struct bcm2835_cb_entry *cb_list;
struct bcm2835_dma_cb *control_block_base;
dma_addr_t control_block_base_phys;
unsigned int frames; unsigned int frames;
size_t size; size_t size;
...@@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc( ...@@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
{ {
struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
dma_free_coherent(desc->vd.tx.chan->device->dev, int i;
desc->control_block_size,
desc->control_block_base, for (i = 0; i < desc->frames; i++)
desc->control_block_base_phys); dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
desc->cb_list[i].paddr);
kfree(desc->cb_list);
kfree(desc); kfree(desc);
} }
...@@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c) ...@@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
c->desc = d = to_bcm2835_dma_desc(&vd->tx); c->desc = d = to_bcm2835_dma_desc(&vd->tx);
writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
} }
...@@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) ...@@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
{ {
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
struct device *dev = c->vc.chan.device->dev;
dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
dev_dbg(c->vc.chan.device->dev, c->cb_pool = dma_pool_create(dev_name(dev), dev,
"Allocating DMA channel %d\n", c->ch); sizeof(struct bcm2835_dma_cb), 0, 0);
if (!c->cb_pool) {
dev_err(dev, "unable to allocate descriptor pool\n");
return -ENOMEM;
}
return request_irq(c->irq_number, return request_irq(c->irq_number,
bcm2835_dma_callback, 0, "DMA IRQ", c); bcm2835_dma_callback, 0, "DMA IRQ", c);
...@@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) ...@@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(&c->vc); vchan_free_chan_resources(&c->vc);
free_irq(c->irq_number, c); free_irq(c->irq_number, c);
dma_pool_destroy(c->cb_pool);
dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
} }
...@@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) ...@@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
size_t size; size_t size;
for (size = i = 0; i < d->frames; i++) { for (size = i = 0; i < d->frames; i++) {
struct bcm2835_dma_cb *control_block = struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
&d->control_block_base[i];
size_t this_size = control_block->length; size_t this_size = control_block->length;
dma_addr_t dma; dma_addr_t dma;
...@@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( ...@@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
dma_addr_t dev_addr; dma_addr_t dev_addr;
unsigned int es, sync_type; unsigned int es, sync_type;
unsigned int frame; unsigned int frame;
int i;
/* Grab configuration */ /* Grab configuration */
if (!is_slave_direction(direction)) { if (!is_slave_direction(direction)) {
...@@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( ...@@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
if (!d) if (!d)
return NULL; return NULL;
d->c = c;
d->dir = direction; d->dir = direction;
d->frames = buf_len / period_len; d->frames = buf_len / period_len;
/* Allocate memory for control blocks */ d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); if (!d->cb_list) {
d->control_block_base = dma_zalloc_coherent(chan->device->dev,
d->control_block_size, &d->control_block_base_phys,
GFP_NOWAIT);
if (!d->control_block_base) {
kfree(d); kfree(d);
return NULL; return NULL;
} }
/* Allocate memory for control blocks */
for (i = 0; i < d->frames; i++) {
struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
&cb_entry->paddr);
if (!cb_entry->cb)
goto error_cb;
}
/* /*
* Iterate over all frames, create a control block * Iterate over all frames, create a control block
* for each frame and link them together. * for each frame and link them together.
*/ */
for (frame = 0; frame < d->frames; frame++) { for (frame = 0; frame < d->frames; frame++) {
struct bcm2835_dma_cb *control_block = struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
&d->control_block_base[frame];
/* Setup adresses */ /* Setup adresses */
if (d->dir == DMA_DEV_TO_MEM) { if (d->dir == DMA_DEV_TO_MEM) {
...@@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( ...@@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
* This DMA engine driver currently only supports cyclic DMA. * This DMA engine driver currently only supports cyclic DMA.
* Therefore, wrap around at number of frames. * Therefore, wrap around at number of frames.
*/ */
control_block->next = d->control_block_base_phys + control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
sizeof(struct bcm2835_dma_cb)
* ((frame + 1) % d->frames);
} }
return vchan_tx_prep(&c->vc, &d->vd, flags); return vchan_tx_prep(&c->vc, &d->vd, flags);
error_cb:
i--;
for (; i >= 0; i--) {
struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
}
kfree(d->cb_list);
kfree(d);
return NULL;
} }
static int bcm2835_dma_slave_config(struct dma_chan *chan, static int bcm2835_dma_slave_config(struct dma_chan *chan,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment