Commit 9bb67696 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (28 commits)
  ioat: cleanup ->timer_fn() and ->cleanup_fn() prototypes
  ioat3: interrupt coalescing
  ioat: close potential BUG_ON race in the descriptor cleanup path
  ioat2: kill pending flag
  ioat3: use ioat2_quiesce()
  ioat3: cleanup, don't enable DCA completion writes
  DMAENGINE: COH 901 318 lli sg offset fix
  DMAENGINE: COH 901 318 configure channel direction
  DMAENGINE: COH 901 318 remove irq counting
  DMAENGINE: COH 901 318 descriptor pool refactoring
  DMAENGINE: COH 901 318 cleanups
  dma: Add MPC512x DMA driver
  Debugging options for the DMA engine subsystem
  iop-adma: redundant/wrong tests in iop_*_count()?
  dmatest: fix handling of an even number of xor_sources
  dmatest: correct raid6 PQ test
  fsldma: Fix cookie issues
  fsldma: Fix cookie issues
  dma: cases IPU_PIX_FMT_BGRA32, BGR32 and ABGR32 are the same in ipu_ch_param_set_size()
  dma: make Open Firmware device id constant
  ...
parents 0f2cc4ec dd58ffcf
...@@ -44,21 +44,29 @@ Example: ...@@ -44,21 +44,29 @@ Example:
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <0>; cell-index = <0>;
reg = <0 0x80>; reg = <0 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
dma-channel@80 { dma-channel@80 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <1>; cell-index = <1>;
reg = <0x80 0x80>; reg = <0x80 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
dma-channel@100 { dma-channel@100 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <2>; cell-index = <2>;
reg = <0x100 0x80>; reg = <0x100 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
dma-channel@180 { dma-channel@180 {
compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel"; compatible = "fsl,mpc8349-dma-channel", "fsl,elo-dma-channel";
cell-index = <3>; cell-index = <3>;
reg = <0x180 0x80>; reg = <0x180 0x80>;
interrupt-parent = <&ipic>;
interrupts = <71 8>;
}; };
}; };
......
...@@ -366,7 +366,6 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt, ...@@ -366,7 +366,6 @@ static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
slot_cnt += *slots_per_op; slot_cnt += *slots_per_op;
} }
if (len)
slot_cnt += *slots_per_op; slot_cnt += *slots_per_op;
return slot_cnt; return slot_cnt;
...@@ -389,7 +388,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, ...@@ -389,7 +388,6 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
slot_cnt += *slots_per_op; slot_cnt += *slots_per_op;
} }
if (len)
slot_cnt += *slots_per_op; slot_cnt += *slots_per_op;
return slot_cnt; return slot_cnt;
...@@ -737,11 +735,9 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) ...@@ -737,11 +735,9 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
i += slots_per_op; i += slots_per_op;
} while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT); } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
if (len) {
iter = iop_hw_desc_slot_idx(hw_desc, i); iter = iop_hw_desc_slot_idx(hw_desc, i);
iter->byte_count = len; iter->byte_count = len;
} }
}
} }
static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
......
...@@ -53,7 +53,7 @@ struct coh901318_params { ...@@ -53,7 +53,7 @@ struct coh901318_params {
* struct coh_dma_channel - dma channel base * struct coh_dma_channel - dma channel base
* @name: ascii name of dma channel * @name: ascii name of dma channel
* @number: channel id number * @number: channel id number
* @desc_nbr_max: number of preallocated descriptortors * @desc_nbr_max: number of preallocated descriptors
* @priority_high: prio of channel, 0 low otherwise high. * @priority_high: prio of channel, 0 low otherwise high.
* @param: configuration parameters * @param: configuration parameters
* @dev_addr: physical address of periphal connected to channel * @dev_addr: physical address of periphal connected to channel
......
...@@ -13,6 +13,22 @@ menuconfig DMADEVICES ...@@ -13,6 +13,22 @@ menuconfig DMADEVICES
DMA Device drivers supported by the configured arch, it may DMA Device drivers supported by the configured arch, it may
be empty in some cases. be empty in some cases.
config DMADEVICES_DEBUG
bool "DMA Engine debugging"
depends on DMADEVICES != n
help
This is an option for use by developers; most people should
say N here. This enables DMA engine core and driver debugging.
config DMADEVICES_VDEBUG
bool "DMA Engine verbose debugging"
depends on DMADEVICES_DEBUG != n
help
This is an option for use by developers; most people should
say N here. This enables deeper (more verbose) debugging of
the DMA engine core and drivers.
if DMADEVICES if DMADEVICES
comment "DMA Devices" comment "DMA Devices"
...@@ -69,6 +85,13 @@ config FSL_DMA ...@@ -69,6 +85,13 @@ config FSL_DMA
The Elo is the DMA controller on some 82xx and 83xx parts, and the The Elo is the DMA controller on some 82xx and 83xx parts, and the
Elo Plus is the DMA controller on 85xx and 86xx parts. Elo Plus is the DMA controller on 85xx and 86xx parts.
config MPC512X_DMA
tristate "Freescale MPC512x built-in DMA engine support"
depends on PPC_MPC512x
select DMA_ENGINE
---help---
Enable support for the Freescale MPC512x built-in DMA engine.
config MV_XOR config MV_XOR
bool "Marvell XOR engine support" bool "Marvell XOR engine support"
depends on PLAT_ORION depends on PLAT_ORION
......
ifeq ($(CONFIG_DMADEVICES_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif
ifeq ($(CONFIG_DMADEVICES_VDEBUG),y)
EXTRA_CFLAGS += -DVERBOSE_DEBUG
endif
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
......
...@@ -39,7 +39,6 @@ struct coh901318_desc { ...@@ -39,7 +39,6 @@ struct coh901318_desc {
unsigned int sg_len; unsigned int sg_len;
struct coh901318_lli *data; struct coh901318_lli *data;
enum dma_data_direction dir; enum dma_data_direction dir;
int pending_irqs;
unsigned long flags; unsigned long flags;
}; };
...@@ -72,7 +71,6 @@ struct coh901318_chan { ...@@ -72,7 +71,6 @@ struct coh901318_chan {
unsigned long nbr_active_done; unsigned long nbr_active_done;
unsigned long busy; unsigned long busy;
int pending_irqs;
struct coh901318_base *base; struct coh901318_base *base;
}; };
...@@ -80,18 +78,16 @@ struct coh901318_chan { ...@@ -80,18 +78,16 @@ struct coh901318_chan {
static void coh901318_list_print(struct coh901318_chan *cohc, static void coh901318_list_print(struct coh901318_chan *cohc,
struct coh901318_lli *lli) struct coh901318_lli *lli)
{ {
struct coh901318_lli *l; struct coh901318_lli *l = lli;
dma_addr_t addr = virt_to_phys(lli);
int i = 0; int i = 0;
while (addr) { while (l) {
l = phys_to_virt(addr);
dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
", dst 0x%x, link 0x%x link_virt 0x%p\n", ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
i, l, l->control, l->src_addr, l->dst_addr, i, l, l->control, l->src_addr, l->dst_addr,
l->link_addr, phys_to_virt(l->link_addr)); l->link_addr, l->virt_link_addr);
i++; i++;
addr = l->link_addr; l = l->virt_link_addr;
} }
} }
...@@ -125,7 +121,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf, ...@@ -125,7 +121,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
goto err_kmalloc; goto err_kmalloc;
tmp = dev_buf; tmp = dev_buf;
tmp += sprintf(tmp, "DMA -- enable dma channels\n"); tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
if (started_channels & (1 << i)) if (started_channels & (1 << i))
...@@ -337,16 +333,22 @@ coh901318_desc_get(struct coh901318_chan *cohc) ...@@ -337,16 +333,22 @@ coh901318_desc_get(struct coh901318_chan *cohc)
* TODO: alloc a pile of descs instead of just one, * TODO: alloc a pile of descs instead of just one,
* avoid many small allocations. * avoid many small allocations.
*/ */
desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
if (desc == NULL) if (desc == NULL)
goto out; goto out;
INIT_LIST_HEAD(&desc->node); INIT_LIST_HEAD(&desc->node);
dma_async_tx_descriptor_init(&desc->desc, &cohc->chan);
} else { } else {
/* Reuse an old desc. */ /* Reuse an old desc. */
desc = list_first_entry(&cohc->free, desc = list_first_entry(&cohc->free,
struct coh901318_desc, struct coh901318_desc,
node); node);
list_del(&desc->node); list_del(&desc->node);
/* Initialize it a bit so it's not insane */
desc->sg = NULL;
desc->sg_len = 0;
desc->desc.callback = NULL;
desc->desc.callback_param = NULL;
} }
out: out:
...@@ -364,10 +366,6 @@ static void ...@@ -364,10 +366,6 @@ static void
coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
{ {
list_add_tail(&desc->node, &cohc->active); list_add_tail(&desc->node, &cohc->active);
BUG_ON(cohc->pending_irqs != 0);
cohc->pending_irqs = desc->pending_irqs;
} }
static struct coh901318_desc * static struct coh901318_desc *
...@@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) ...@@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
return cohd_que; return cohd_que;
} }
/*
* This tasklet is called from the interrupt handler to
* handle each descriptor (DMA job) that is sent to a channel.
*/
static void dma_tasklet(unsigned long data) static void dma_tasklet(unsigned long data)
{ {
struct coh901318_chan *cohc = (struct coh901318_chan *) data; struct coh901318_chan *cohc = (struct coh901318_chan *) data;
...@@ -600,55 +602,58 @@ static void dma_tasklet(unsigned long data) ...@@ -600,55 +602,58 @@ static void dma_tasklet(unsigned long data)
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *callback_param; void *callback_param;
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
" nbr_active_done %ld\n", __func__,
cohc->id, cohc->nbr_active_done);
spin_lock_irqsave(&cohc->lock, flags); spin_lock_irqsave(&cohc->lock, flags);
/* get first active entry from list */ /* get first active descriptor entry from list */
cohd_fin = coh901318_first_active_get(cohc); cohd_fin = coh901318_first_active_get(cohc);
BUG_ON(cohd_fin->pending_irqs == 0);
if (cohd_fin == NULL) if (cohd_fin == NULL)
goto err; goto err;
cohd_fin->pending_irqs--; /* locate callback to client */
cohc->completed = cohd_fin->desc.cookie; callback = cohd_fin->desc.callback;
callback_param = cohd_fin->desc.callback_param;
if (cohc->nbr_active_done == 0) /* sign this job as completed on the channel */
return; cohc->completed = cohd_fin->desc.cookie;
if (!cohd_fin->pending_irqs) { /* release the lli allocation and remove the descriptor */
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd_fin->data); coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
}
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d" /* return desc to free-list */
" nbr_active_done %ld\n", __func__, coh901318_desc_remove(cohd_fin);
cohc->id, cohc->pending_irqs, cohc->nbr_active_done); coh901318_desc_free(cohc, cohd_fin);
/* callback to client */ spin_unlock_irqrestore(&cohc->lock, flags);
callback = cohd_fin->desc.callback;
callback_param = cohd_fin->desc.callback_param;
if (!cohd_fin->pending_irqs) { /* Call the callback when we're done */
coh901318_desc_remove(cohd_fin); if (callback)
callback(callback_param);
/* return desc to free-list */ spin_lock_irqsave(&cohc->lock, flags);
coh901318_desc_free(cohc, cohd_fin);
}
if (cohc->nbr_active_done) /*
* If another interrupt fired while the tasklet was scheduling,
* we don't get called twice, so we have this number of active
* counter that keep track of the number of IRQs expected to
* be handled for this channel. If there happen to be more than
* one IRQ to be ack:ed, we simply schedule this tasklet again.
*/
cohc->nbr_active_done--; cohc->nbr_active_done--;
if (cohc->nbr_active_done) { if (cohc->nbr_active_done) {
dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs "
"came in while we were scheduling this tasklet\n");
if (cohc_chan_conf(cohc)->priority_high) if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet); tasklet_hi_schedule(&cohc->tasklet);
else else
tasklet_schedule(&cohc->tasklet); tasklet_schedule(&cohc->tasklet);
} }
spin_unlock_irqrestore(&cohc->lock, flags);
if (callback) spin_unlock_irqrestore(&cohc->lock, flags);
callback(callback_param);
return; return;
...@@ -667,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc) ...@@ -667,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
if (!cohc->allocated) if (!cohc->allocated)
return; return;
BUG_ON(cohc->pending_irqs == 0); spin_lock(&cohc->lock);
cohc->pending_irqs--;
cohc->nbr_active_done++; cohc->nbr_active_done++;
if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL) if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0; cohc->busy = 0;
BUG_ON(list_empty(&cohc->active)); BUG_ON(list_empty(&cohc->active));
spin_unlock(&cohc->lock);
if (cohc_chan_conf(cohc)->priority_high) if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet); tasklet_hi_schedule(&cohc->tasklet);
else else
...@@ -870,6 +876,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, ...@@ -870,6 +876,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_chan *cohc = to_coh901318_chan(chan);
int lli_len; int lli_len;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
int ret;
spin_lock_irqsave(&cohc->lock, flg); spin_lock_irqsave(&cohc->lock, flg);
...@@ -890,22 +897,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, ...@@ -890,22 +897,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (data == NULL) if (data == NULL)
goto err; goto err;
cohd = coh901318_desc_get(cohc); ret = coh901318_lli_fill_memcpy(
cohd->sg = NULL;
cohd->sg_len = 0;
cohd->data = data;
cohd->pending_irqs =
coh901318_lli_fill_memcpy(
&cohc->base->pool, data, src, size, dest, &cohc->base->pool, data, src, size, dest,
cohc_chan_param(cohc)->ctrl_lli_chained, cohc_chan_param(cohc)->ctrl_lli_chained,
ctrl_last); ctrl_last);
cohd->flags = flags; if (ret)
goto err;
COH_DBG(coh901318_list_print(cohc, data)); COH_DBG(coh901318_list_print(cohc, data));
dma_async_tx_descriptor_init(&cohd->desc, chan); /* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
cohd->data = data;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit; cohd->desc.tx_submit = coh901318_tx_submit;
spin_unlock_irqrestore(&cohc->lock, flg); spin_unlock_irqrestore(&cohc->lock, flg);
...@@ -924,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -924,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_lli *data; struct coh901318_lli *data;
struct coh901318_desc *cohd; struct coh901318_desc *cohd;
const struct coh901318_params *params;
struct scatterlist *sg; struct scatterlist *sg;
int len = 0; int len = 0;
int size; int size;
...@@ -931,7 +936,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -931,7 +936,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
u32 config;
unsigned long flg; unsigned long flg;
int ret;
if (!sgl) if (!sgl)
goto out; goto out;
...@@ -947,15 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -947,15 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* Trigger interrupt after last lli */ /* Trigger interrupt after last lli */
ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
cohd = coh901318_desc_get(cohc); params = cohc_chan_param(cohc);
cohd->sg = NULL; config = params->config;
cohd->sg_len = 0;
cohd->dir = direction;
if (direction == DMA_TO_DEVICE) { if (direction == DMA_TO_DEVICE) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY;
ctrl_chained |= tx_flags; ctrl_chained |= tx_flags;
ctrl_last |= tx_flags; ctrl_last |= tx_flags;
ctrl |= tx_flags; ctrl |= tx_flags;
...@@ -963,16 +969,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -963,16 +969,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY;
ctrl_chained |= rx_flags; ctrl_chained |= rx_flags;
ctrl_last |= rx_flags; ctrl_last |= rx_flags;
ctrl |= rx_flags; ctrl |= rx_flags;
} else } else
goto err_direction; goto err_direction;
dma_async_tx_descriptor_init(&cohd->desc, chan); coh901318_set_conf(cohc, config);
cohd->desc.tx_submit = coh901318_tx_submit;
/* The dma only supports transmitting packages up to /* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of * MAX_DMA_PACKET_SIZE. Calculate to total number of
...@@ -994,32 +998,37 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -994,32 +998,37 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
len += factor; len += factor;
} }
pr_debug("Allocate %d lli:s for this transfer\n", len);
data = coh901318_lli_alloc(&cohc->base->pool, len); data = coh901318_lli_alloc(&cohc->base->pool, len);
if (data == NULL) if (data == NULL)
goto err_dma_alloc; goto err_dma_alloc;
/* initiate allocated data list */ /* initiate allocated data list */
cohd->pending_irqs = ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
cohc_dev_addr(cohc), cohc_dev_addr(cohc),
ctrl_chained, ctrl_chained,
ctrl, ctrl,
ctrl_last, ctrl_last,
direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
cohd->data = data; if (ret)
goto err_lli_fill;
cohd->flags = flags;
COH_DBG(coh901318_list_print(cohc, data)); COH_DBG(coh901318_list_print(cohc, data));
/* Pick a descriptor to handle this transfer */
cohd = coh901318_desc_get(cohc);
cohd->dir = direction;
cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
cohd->data = data;
spin_unlock_irqrestore(&cohc->lock, flg); spin_unlock_irqrestore(&cohc->lock, flg);
return &cohd->desc; return &cohd->desc;
err_lli_fill:
err_dma_alloc: err_dma_alloc:
err_direction: err_direction:
coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd);
spin_unlock_irqrestore(&cohc->lock, flg); spin_unlock_irqrestore(&cohc->lock, flg);
out: out:
return NULL; return NULL;
...@@ -1092,9 +1101,8 @@ coh901318_terminate_all(struct dma_chan *chan) ...@@ -1092,9 +1101,8 @@ coh901318_terminate_all(struct dma_chan *chan)
/* release the lli allocation*/ /* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data); coh901318_lli_free(&cohc->base->pool, &cohd->data);
coh901318_desc_remove(cohd);
/* return desc to free-list */ /* return desc to free-list */
coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd); coh901318_desc_free(cohc, cohd);
} }
...@@ -1102,16 +1110,14 @@ coh901318_terminate_all(struct dma_chan *chan) ...@@ -1102,16 +1110,14 @@ coh901318_terminate_all(struct dma_chan *chan)
/* release the lli allocation*/ /* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data); coh901318_lli_free(&cohc->base->pool, &cohd->data);
coh901318_desc_remove(cohd);
/* return desc to free-list */ /* return desc to free-list */
coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd); coh901318_desc_free(cohc, cohd);
} }
cohc->nbr_active_done = 0; cohc->nbr_active_done = 0;
cohc->busy = 0; cohc->busy = 0;
cohc->pending_irqs = 0;
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
} }
...@@ -1138,7 +1144,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans, ...@@ -1138,7 +1144,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
spin_lock_init(&cohc->lock); spin_lock_init(&cohc->lock);
cohc->pending_irqs = 0;
cohc->nbr_active_done = 0; cohc->nbr_active_done = 0;
cohc->busy = 0; cohc->busy = 0;
INIT_LIST_HEAD(&cohc->free); INIT_LIST_HEAD(&cohc->free);
...@@ -1254,12 +1259,17 @@ static int __init coh901318_probe(struct platform_device *pdev) ...@@ -1254,12 +1259,17 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_issue_pending = coh901318_issue_pending; base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all; base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
base->dma_memcpy.dev = &pdev->dev; base->dma_memcpy.dev = &pdev->dev;
/*
* This controller can only access address at even 32bit boundaries,
* i.e. 2^2
*/
base->dma_memcpy.copy_align = 2;
err = dma_async_device_register(&base->dma_memcpy); err = dma_async_device_register(&base->dma_memcpy);
if (err) if (err)
goto err_register_memcpy; goto err_register_memcpy;
dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
(u32) base->virtbase); (u32) base->virtbase);
return err; return err;
......
...@@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) ...@@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
lli = head; lli = head;
lli->phy_this = phy; lli->phy_this = phy;
lli->link_addr = 0x00000000;
lli->virt_link_addr = 0x00000000U;
for (i = 1; i < len; i++) { for (i = 1; i < len; i++) {
lli_prev = lli; lli_prev = lli;
...@@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len) ...@@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
DEBUGFS_POOL_COUNTER_ADD(pool, 1); DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli->phy_this = phy; lli->phy_this = phy;
lli->link_addr = 0x00000000;
lli->virt_link_addr = 0x00000000U;
lli_prev->link_addr = phy; lli_prev->link_addr = phy;
lli_prev->virt_link_addr = lli; lli_prev->virt_link_addr = lli;
} }
lli->link_addr = 0x00000000U;
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return head; return head;
...@@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool, ...@@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
lli->src_addr = src; lli->src_addr = src;
lli->dst_addr = dst; lli->dst_addr = dst;
/* One irq per single transfer */ return 0;
return 1;
} }
int int
...@@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool, ...@@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli->src_addr = src; lli->src_addr = src;
lli->dst_addr = dst; lli->dst_addr = dst;
/* One irq per single transfer */ return 0;
return 1;
} }
int int
...@@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, ...@@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
u32 ctrl_sg; u32 ctrl_sg;
dma_addr_t src = 0; dma_addr_t src = 0;
dma_addr_t dst = 0; dma_addr_t dst = 0;
int nbr_of_irq = 0;
u32 bytes_to_transfer; u32 bytes_to_transfer;
u32 elem_size; u32 elem_size;
...@@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, ...@@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last; ctrl_sg = ctrl ? ctrl : ctrl_last;
if ((ctrl_sg & ctrl_irq_mask))
nbr_of_irq++;
if (dir == DMA_TO_DEVICE) if (dir == DMA_TO_DEVICE)
/* increment source address */ /* increment source address */
src = sg_dma_address(sg); src = sg_phys(sg);
else else
/* increment destination address */ /* increment destination address */
dst = sg_dma_address(sg); dst = sg_phys(sg);
bytes_to_transfer = sg_dma_len(sg); bytes_to_transfer = sg_dma_len(sg);
...@@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool, ...@@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
} }
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
/* There can be many IRQs per sg transfer */ return 0;
return nbr_of_irq;
err: err:
spin_unlock(&pool->lock); spin_unlock(&pool->lock);
return -EINVAL; return -EINVAL;
......
...@@ -237,7 +237,7 @@ static int dmatest_func(void *data) ...@@ -237,7 +237,7 @@ static int dmatest_func(void *data)
dma_cookie_t cookie; dma_cookie_t cookie;
enum dma_status status; enum dma_status status;
enum dma_ctrl_flags flags; enum dma_ctrl_flags flags;
u8 pq_coefs[pq_sources]; u8 pq_coefs[pq_sources + 1];
int ret; int ret;
int src_cnt; int src_cnt;
int dst_cnt; int dst_cnt;
...@@ -257,7 +257,7 @@ static int dmatest_func(void *data) ...@@ -257,7 +257,7 @@ static int dmatest_func(void *data)
} else if (thread->type == DMA_PQ) { } else if (thread->type == DMA_PQ) {
src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 2; dst_cnt = 2;
for (i = 0; i < pq_sources; i++) for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1; pq_coefs[i] = 1;
} else } else
goto err_srcs; goto err_srcs;
...@@ -347,7 +347,7 @@ static int dmatest_func(void *data) ...@@ -347,7 +347,7 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_XOR) else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan, tx = dev->device_prep_dma_xor(chan,
dma_dsts[0] + dst_off, dma_dsts[0] + dst_off,
dma_srcs, xor_sources, dma_srcs, src_cnt,
len, flags); len, flags);
else if (thread->type == DMA_PQ) { else if (thread->type == DMA_PQ) {
dma_addr_t dma_pq[dst_cnt]; dma_addr_t dma_pq[dst_cnt];
...@@ -355,7 +355,7 @@ static int dmatest_func(void *data) ...@@ -355,7 +355,7 @@ static int dmatest_func(void *data)
for (i = 0; i < dst_cnt; i++) for (i = 0; i < dst_cnt; i++)
dma_pq[i] = dma_dsts[i] + dst_off; dma_pq[i] = dma_dsts[i] + dst_off;
tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
pq_sources, pq_coefs, src_cnt, pq_coefs,
len, flags); len, flags);
} }
......
...@@ -37,19 +37,19 @@ ...@@ -37,19 +37,19 @@
#include <asm/fsldma.h> #include <asm/fsldma.h>
#include "fsldma.h" #include "fsldma.h"
static void dma_init(struct fsl_dma_chan *fsl_chan) static void dma_init(struct fsldma_chan *chan)
{ {
/* Reset the channel */ /* Reset the channel */
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); DMA_OUT(chan, &chan->regs->mr, 0, 32);
switch (fsl_chan->feature & FSL_DMA_IP_MASK) { switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX: case FSL_DMA_IP_85XX:
/* Set the channel to below modes: /* Set the channel to below modes:
* EIE - Error interrupt enable * EIE - Error interrupt enable
* EOSIE - End of segments interrupt enable (basic mode) * EOSIE - End of segments interrupt enable (basic mode)
* EOLNIE - End of links interrupt enable * EOLNIE - End of links interrupt enable
*/ */
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE
| FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
break; break;
case FSL_DMA_IP_83XX: case FSL_DMA_IP_83XX:
...@@ -57,170 +57,146 @@ static void dma_init(struct fsl_dma_chan *fsl_chan) ...@@ -57,170 +57,146 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
* EOTIE - End-of-transfer interrupt enable * EOTIE - End-of-transfer interrupt enable
* PRC_RM - PCI read multiple * PRC_RM - PCI read multiple
*/ */
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
| FSL_DMA_MR_PRC_RM, 32); | FSL_DMA_MR_PRC_RM, 32);
break; break;
} }
} }
static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) static void set_sr(struct fsldma_chan *chan, u32 val)
{ {
DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); DMA_OUT(chan, &chan->regs->sr, val, 32);
} }
static u32 get_sr(struct fsl_dma_chan *fsl_chan) static u32 get_sr(struct fsldma_chan *chan)
{ {
return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); return DMA_IN(chan, &chan->regs->sr, 32);
} }
static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, static void set_desc_cnt(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, u32 count) struct fsl_dma_ld_hw *hw, u32 count)
{ {
hw->count = CPU_TO_DMA(fsl_chan, count, 32); hw->count = CPU_TO_DMA(chan, count, 32);
} }
static void set_desc_src(struct fsl_dma_chan *fsl_chan, static void set_desc_src(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t src) struct fsl_dma_ld_hw *hw, dma_addr_t src)
{ {
u64 snoop_bits; u64 snoop_bits;
snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
} }
static void set_desc_dest(struct fsl_dma_chan *fsl_chan, static void set_desc_dst(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t dest) struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{ {
u64 snoop_bits; u64 snoop_bits;
snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
} }
static void set_desc_next(struct fsl_dma_chan *fsl_chan, static void set_desc_next(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t next) struct fsl_dma_ld_hw *hw, dma_addr_t next)
{ {
u64 snoop_bits; u64 snoop_bits;
snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
? FSL_DMA_SNEN : 0; ? FSL_DMA_SNEN : 0;
hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
}
static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
{
DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
} }
static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{ {
return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
} }
static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) static dma_addr_t get_cdar(struct fsldma_chan *chan)
{ {
DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
} }
static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) static dma_addr_t get_ndar(struct fsldma_chan *chan)
{ {
return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); return DMA_IN(chan, &chan->regs->ndar, 64);
} }
static u32 get_bcr(struct fsl_dma_chan *fsl_chan) static u32 get_bcr(struct fsldma_chan *chan)
{ {
return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); return DMA_IN(chan, &chan->regs->bcr, 32);
} }
static int dma_is_idle(struct fsl_dma_chan *fsl_chan) static int dma_is_idle(struct fsldma_chan *chan)
{ {
u32 sr = get_sr(fsl_chan); u32 sr = get_sr(chan);
return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
} }
static void dma_start(struct fsl_dma_chan *fsl_chan) static void dma_start(struct fsldma_chan *chan)
{ {
u32 mr_set = 0; u32 mode;
if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { mode = DMA_IN(chan, &chan->regs->mr, 32);
DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
mr_set |= FSL_DMA_MR_EMP_EN; if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
} else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, DMA_OUT(chan, &chan->regs->bcr, 0, 32);
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) mode |= FSL_DMA_MR_EMP_EN;
& ~FSL_DMA_MR_EMP_EN, 32); } else {
mode &= ~FSL_DMA_MR_EMP_EN;
}
} }
if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) if (chan->feature & FSL_DMA_CHAN_START_EXT)
mr_set |= FSL_DMA_MR_EMS_EN; mode |= FSL_DMA_MR_EMS_EN;
else else
mr_set |= FSL_DMA_MR_CS; mode |= FSL_DMA_MR_CS;
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, DMA_OUT(chan, &chan->regs->mr, mode, 32);
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
| mr_set, 32);
} }
static void dma_halt(struct fsl_dma_chan *fsl_chan) static void dma_halt(struct fsldma_chan *chan)
{ {
u32 mode;
int i; int i;
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode = DMA_IN(chan, &chan->regs->mr, 32);
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, mode |= FSL_DMA_MR_CA;
32); DMA_OUT(chan, &chan->regs->mr, mode, 32);
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
| FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); DMA_OUT(chan, &chan->regs->mr, mode, 32);
for (i = 0; i < 100; i++) { for (i = 0; i < 100; i++) {
if (dma_is_idle(fsl_chan)) if (dma_is_idle(chan))
break; return;
udelay(10); udelay(10);
} }
if (i >= 100 && !dma_is_idle(fsl_chan))
dev_err(fsl_chan->dev, "DMA halt timeout!\n"); if (!dma_is_idle(chan))
dev_err(chan->dev, "DMA halt timeout!\n");
} }
static void set_ld_eol(struct fsl_dma_chan *fsl_chan, static void set_ld_eol(struct fsldma_chan *chan,
struct fsl_desc_sw *desc) struct fsl_desc_sw *desc)
{ {
u64 snoop_bits; u64 snoop_bits;
snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
? FSL_DMA_SNEN : 0; ? FSL_DMA_SNEN : 0;
desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, desc->hw.next_ln_addr = CPU_TO_DMA(chan,
DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
| snoop_bits, 64); | snoop_bits, 64);
} }
static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
struct fsl_desc_sw *new_desc)
{
struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
if (list_empty(&fsl_chan->ld_queue))
return;
/* Link to the new descriptor physical address and
* Enable End-of-segment interrupt for
* the last link descriptor.
* (the previous node's next link descriptor)
*
* For FSL_DMA_IP_83xx, the snoop enable bit need be set.
*/
queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
new_desc->async_tx.phys | FSL_DMA_EOSIE |
(((fsl_chan->feature & FSL_DMA_IP_MASK)
== FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
}
/** /**
* fsl_chan_set_src_loop_size - Set source address hold transfer size * fsl_chan_set_src_loop_size - Set source address hold transfer size
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
* @size : Address loop size, 0 for disable loop * @size : Address loop size, 0 for disable loop
* *
* The set source address hold transfer size. The source * The set source address hold transfer size. The source
...@@ -229,29 +205,30 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan, ...@@ -229,29 +205,30 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
* read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
* SA + 1 ... and so on. * SA + 1 ... and so on.
*/ */
static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
{ {
u32 mode;
mode = DMA_IN(chan, &chan->regs->mr, 32);
switch (size) { switch (size) {
case 0: case 0:
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode &= ~FSL_DMA_MR_SAHE;
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
(~FSL_DMA_MR_SAHE), 32);
break; break;
case 1: case 1:
case 2: case 2:
case 4: case 4:
case 8: case 8:
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
32);
break; break;
} }
DMA_OUT(chan, &chan->regs->mr, mode, 32);
} }
/** /**
* fsl_chan_set_dest_loop_size - Set destination address hold transfer size * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
* @size : Address loop size, 0 for disable loop * @size : Address loop size, 0 for disable loop
* *
* The set destination address hold transfer size. The destination * The set destination address hold transfer size. The destination
...@@ -260,29 +237,30 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) ...@@ -260,29 +237,30 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
* write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
* TA + 1 ... and so on. * TA + 1 ... and so on.
*/ */
static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
{ {
u32 mode;
mode = DMA_IN(chan, &chan->regs->mr, 32);
switch (size) { switch (size) {
case 0: case 0:
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode &= ~FSL_DMA_MR_DAHE;
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
(~FSL_DMA_MR_DAHE), 32);
break; break;
case 1: case 1:
case 2: case 2:
case 4: case 4:
case 8: case 8:
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
32);
break; break;
} }
DMA_OUT(chan, &chan->regs->mr, mode, 32);
} }
/** /**
* fsl_chan_set_request_count - Set DMA Request Count for external control * fsl_chan_set_request_count - Set DMA Request Count for external control
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
* @size : Number of bytes to transfer in a single request * @size : Number of bytes to transfer in a single request
* *
* The Freescale DMA channel can be controlled by the external signal DREQ#. * The Freescale DMA channel can be controlled by the external signal DREQ#.
...@@ -292,35 +270,38 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) ...@@ -292,35 +270,38 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
* *
* A size of 0 disables external pause control. The maximum size is 1024. * A size of 0 disables external pause control. The maximum size is 1024.
*/ */
static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
{ {
u32 mode;
BUG_ON(size > 1024); BUG_ON(size > 1024);
DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) mode = DMA_IN(chan, &chan->regs->mr, 32);
| ((__ilog2(size) << 24) & 0x0f000000), mode |= (__ilog2(size) << 24) & 0x0f000000;
32);
DMA_OUT(chan, &chan->regs->mr, mode, 32);
} }
/** /**
* fsl_chan_toggle_ext_pause - Toggle channel external pause status * fsl_chan_toggle_ext_pause - Toggle channel external pause status
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
* @enable : 0 is disabled, 1 is enabled. * @enable : 0 is disabled, 1 is enabled.
* *
* The Freescale DMA channel can be controlled by the external signal DREQ#. * The Freescale DMA channel can be controlled by the external signal DREQ#.
* The DMA Request Count feature should be used in addition to this feature * The DMA Request Count feature should be used in addition to this feature
* to set the number of bytes to transfer before pausing the channel. * to set the number of bytes to transfer before pausing the channel.
*/ */
static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
{ {
if (enable) if (enable)
fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
else else
fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
} }
/** /**
* fsl_chan_toggle_ext_start - Toggle channel external start status * fsl_chan_toggle_ext_start - Toggle channel external start status
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
* @enable : 0 is disabled, 1 is enabled. * @enable : 0 is disabled, 1 is enabled.
* *
* If enable the external start, the channel can be started by an * If enable the external start, the channel can be started by an
...@@ -328,141 +309,196 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) ...@@ -328,141 +309,196 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
* transfer immediately. The DMA channel will wait for the * transfer immediately. The DMA channel will wait for the
* control pin asserted. * control pin asserted.
*/ */
static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
{ {
if (enable) if (enable)
fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; chan->feature |= FSL_DMA_CHAN_START_EXT;
else else
fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; chan->feature &= ~FSL_DMA_CHAN_START_EXT;
}
static void append_ld_queue(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
if (list_empty(&chan->ld_pending))
goto out_splice;
/*
* Add the hardware descriptor to the chain of hardware descriptors
* that already exists in memory.
*
* This will un-set the EOL bit of the existing transaction, and the
* last link in this transaction will become the EOL descriptor.
*/
set_desc_next(chan, &tail->hw, desc->async_tx.phys);
/*
* Add the software descriptor and all children to the list
* of pending transactions
*/
out_splice:
list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
} }
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{ {
struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); struct fsldma_chan *chan = to_fsl_chan(tx->chan);
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child; struct fsl_desc_sw *child;
unsigned long flags; unsigned long flags;
dma_cookie_t cookie; dma_cookie_t cookie;
/* cookie increment and adding to ld_queue must be atomic */ spin_lock_irqsave(&chan->desc_lock, flags);
spin_lock_irqsave(&fsl_chan->desc_lock, flags);
cookie = fsl_chan->common.cookie; /*
* assign cookies to all of the software descriptors
* that make up this transaction
*/
cookie = chan->common.cookie;
list_for_each_entry(child, &desc->tx_list, node) { list_for_each_entry(child, &desc->tx_list, node) {
cookie++; cookie++;
if (cookie < 0) if (cookie < 0)
cookie = 1; cookie = 1;
desc->async_tx.cookie = cookie; child->async_tx.cookie = cookie;
} }
fsl_chan->common.cookie = cookie; chan->common.cookie = cookie;
append_ld_queue(fsl_chan, desc);
list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); /* put this transaction onto the tail of the pending queue */
append_ld_queue(chan, desc);
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); spin_unlock_irqrestore(&chan->desc_lock, flags);
return cookie; return cookie;
} }
/** /**
* fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
* *
* Return - The descriptor allocated. NULL for failed. * Return - The descriptor allocated. NULL for failed.
*/ */
static struct fsl_desc_sw *fsl_dma_alloc_descriptor( static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
struct fsl_dma_chan *fsl_chan) struct fsldma_chan *chan)
{ {
struct fsl_desc_sw *desc;
dma_addr_t pdesc; dma_addr_t pdesc;
struct fsl_desc_sw *desc_sw;
desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); if (!desc) {
if (desc_sw) { dev_dbg(chan->dev, "out of memory for link desc\n");
memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); return NULL;
INIT_LIST_HEAD(&desc_sw->tx_list);
dma_async_tx_descriptor_init(&desc_sw->async_tx,
&fsl_chan->common);
desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
desc_sw->async_tx.phys = pdesc;
} }
return desc_sw; memset(desc, 0, sizeof(*desc));
INIT_LIST_HEAD(&desc->tx_list);
dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
desc->async_tx.tx_submit = fsl_dma_tx_submit;
desc->async_tx.phys = pdesc;
return desc;
} }
/** /**
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
* *
* This function will create a dma pool for descriptor allocation. * This function will create a dma pool for descriptor allocation.
* *
* Return - The number of descriptors allocated. * Return - The number of descriptors allocated.
*/ */
static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
{ {
struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); struct fsldma_chan *chan = to_fsl_chan(dchan);
/* Has this channel already been allocated? */ /* Has this channel already been allocated? */
if (fsl_chan->desc_pool) if (chan->desc_pool)
return 1; return 1;
/* We need the descriptor to be aligned to 32bytes /*
* We need the descriptor to be aligned to 32bytes
* for meeting FSL DMA specification requirement. * for meeting FSL DMA specification requirement.
*/ */
fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
fsl_chan->dev, sizeof(struct fsl_desc_sw), chan->dev,
32, 0); sizeof(struct fsl_desc_sw),
if (!fsl_chan->desc_pool) { __alignof__(struct fsl_desc_sw), 0);
dev_err(fsl_chan->dev, "No memory for channel %d " if (!chan->desc_pool) {
"descriptor dma pool.\n", fsl_chan->id); dev_err(chan->dev, "unable to allocate channel %d "
return 0; "descriptor pool\n", chan->id);
return -ENOMEM;
} }
/* there is at least one descriptor free to be allocated */
return 1; return 1;
} }
/** /**
* fsl_dma_free_chan_resources - Free all resources of the channel. * fsldma_free_desc_list - Free all descriptors in a queue
* @fsl_chan : Freescale DMA channel * @chan: Freescae DMA channel
* @list: the list to free
*
* LOCKING: must hold chan->desc_lock
*/ */
static void fsl_dma_free_chan_resources(struct dma_chan *chan) static void fsldma_free_desc_list(struct fsldma_chan *chan,
struct list_head *list)
{ {
struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
struct fsl_desc_sw *desc, *_desc; struct fsl_desc_sw *desc, *_desc;
unsigned long flags;
dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); list_for_each_entry_safe(desc, _desc, list, node) {
spin_lock_irqsave(&fsl_chan->desc_lock, flags); list_del(&desc->node);
list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
#ifdef FSL_DMA_LD_DEBUG }
dev_dbg(fsl_chan->dev, }
"LD %p will be released.\n", desc);
#endif static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
struct list_head *list)
{
struct fsl_desc_sw *desc, *_desc;
list_for_each_entry_safe_reverse(desc, _desc, list, node) {
list_del(&desc->node); list_del(&desc->node);
/* free link descriptor */ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
} }
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); }
dma_pool_destroy(fsl_chan->desc_pool);
fsl_chan->desc_pool = NULL; /**
* fsl_dma_free_chan_resources - Free all resources of the channel.
* @chan : Freescale DMA channel
*/
static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
{
struct fsldma_chan *chan = to_fsl_chan(dchan);
unsigned long flags;
dev_dbg(chan->dev, "Free all channel resources.\n");
spin_lock_irqsave(&chan->desc_lock, flags);
fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running);
spin_unlock_irqrestore(&chan->desc_lock, flags);
dma_pool_destroy(chan->desc_pool);
chan->desc_pool = NULL;
} }
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
{ {
struct fsl_dma_chan *fsl_chan; struct fsldma_chan *chan;
struct fsl_desc_sw *new; struct fsl_desc_sw *new;
if (!chan) if (!dchan)
return NULL; return NULL;
fsl_chan = to_fsl_chan(chan); chan = to_fsl_chan(dchan);
new = fsl_dma_alloc_descriptor(fsl_chan); new = fsl_dma_alloc_descriptor(chan);
if (!new) { if (!new) {
dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); dev_err(chan->dev, "No free memory for link descriptor\n");
return NULL; return NULL;
} }
...@@ -473,51 +509,50 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) ...@@ -473,51 +509,50 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
list_add_tail(&new->node, &new->tx_list); list_add_tail(&new->node, &new->tx_list);
/* Set End-of-link to the last link descriptor of new list*/ /* Set End-of-link to the last link descriptor of new list*/
set_ld_eol(fsl_chan, new); set_ld_eol(chan, new);
return &new->async_tx; return &new->async_tx;
} }
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, unsigned long flags) size_t len, unsigned long flags)
{ {
struct fsl_dma_chan *fsl_chan; struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new; struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
struct list_head *list;
size_t copy; size_t copy;
if (!chan) if (!dchan)
return NULL; return NULL;
if (!len) if (!len)
return NULL; return NULL;
fsl_chan = to_fsl_chan(chan); chan = to_fsl_chan(dchan);
do { do {
/* Allocate the link descriptor from DMA pool */ /* Allocate the link descriptor from DMA pool */
new = fsl_dma_alloc_descriptor(fsl_chan); new = fsl_dma_alloc_descriptor(chan);
if (!new) { if (!new) {
dev_err(fsl_chan->dev, dev_err(chan->dev,
"No free memory for link descriptor\n"); "No free memory for link descriptor\n");
goto fail; goto fail;
} }
#ifdef FSL_DMA_LD_DEBUG #ifdef FSL_DMA_LD_DEBUG
dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif #endif
copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
set_desc_cnt(fsl_chan, &new->hw, copy); set_desc_cnt(chan, &new->hw, copy);
set_desc_src(fsl_chan, &new->hw, dma_src); set_desc_src(chan, &new->hw, dma_src);
set_desc_dest(fsl_chan, &new->hw, dma_dest); set_desc_dst(chan, &new->hw, dma_dst);
if (!first) if (!first)
first = new; first = new;
else else
set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); set_desc_next(chan, &prev->hw, new->async_tx.phys);
new->async_tx.cookie = 0; new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx); async_tx_ack(&new->async_tx);
...@@ -525,7 +560,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( ...@@ -525,7 +560,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
prev = new; prev = new;
len -= copy; len -= copy;
dma_src += copy; dma_src += copy;
dma_dest += copy; dma_dst += copy;
/* Insert the link descriptor to the LD ring */ /* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &first->tx_list); list_add_tail(&new->node, &first->tx_list);
...@@ -535,7 +570,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( ...@@ -535,7 +570,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
new->async_tx.cookie = -EBUSY; new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list*/ /* Set End-of-link to the last link descriptor of new list*/
set_ld_eol(fsl_chan, new); set_ld_eol(chan, new);
return &first->async_tx; return &first->async_tx;
...@@ -543,12 +578,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( ...@@ -543,12 +578,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
if (!first) if (!first)
return NULL; return NULL;
list = &first->tx_list; fsldma_free_desc_list_reverse(chan, &first->tx_list);
list_for_each_entry_safe_reverse(new, prev, list, node) {
list_del(&new->node);
dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
}
return NULL; return NULL;
} }
...@@ -565,13 +595,12 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( ...@@ -565,13 +595,12 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
* chan->private variable. * chan->private variable.
*/ */
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_data_direction direction, unsigned long flags) enum dma_data_direction direction, unsigned long flags)
{ {
struct fsl_dma_chan *fsl_chan; struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
struct fsl_dma_slave *slave; struct fsl_dma_slave *slave;
struct list_head *tx_list;
size_t copy; size_t copy;
int i; int i;
...@@ -581,14 +610,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( ...@@ -581,14 +610,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct fsl_dma_hw_addr *hw; struct fsl_dma_hw_addr *hw;
dma_addr_t dma_dst, dma_src; dma_addr_t dma_dst, dma_src;
if (!chan) if (!dchan)
return NULL; return NULL;
if (!chan->private) if (!dchan->private)
return NULL; return NULL;
fsl_chan = to_fsl_chan(chan); chan = to_fsl_chan(dchan);
slave = chan->private; slave = dchan->private;
if (list_empty(&slave->addresses)) if (list_empty(&slave->addresses))
return NULL; return NULL;
...@@ -637,14 +666,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( ...@@ -637,14 +666,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
} }
/* Allocate the link descriptor from DMA pool */ /* Allocate the link descriptor from DMA pool */
new = fsl_dma_alloc_descriptor(fsl_chan); new = fsl_dma_alloc_descriptor(chan);
if (!new) { if (!new) {
dev_err(fsl_chan->dev, "No free memory for " dev_err(chan->dev, "No free memory for "
"link descriptor\n"); "link descriptor\n");
goto fail; goto fail;
} }
#ifdef FSL_DMA_LD_DEBUG #ifdef FSL_DMA_LD_DEBUG
dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif #endif
/* /*
...@@ -671,9 +700,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( ...@@ -671,9 +700,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
} }
/* Fill in the descriptor */ /* Fill in the descriptor */
set_desc_cnt(fsl_chan, &new->hw, copy); set_desc_cnt(chan, &new->hw, copy);
set_desc_src(fsl_chan, &new->hw, dma_src); set_desc_src(chan, &new->hw, dma_src);
set_desc_dest(fsl_chan, &new->hw, dma_dst); set_desc_dst(chan, &new->hw, dma_dst);
/* /*
* If this is not the first descriptor, chain the * If this is not the first descriptor, chain the
...@@ -682,7 +711,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( ...@@ -682,7 +711,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
if (!first) { if (!first) {
first = new; first = new;
} else { } else {
set_desc_next(fsl_chan, &prev->hw, set_desc_next(chan, &prev->hw,
new->async_tx.phys); new->async_tx.phys);
} }
...@@ -708,23 +737,23 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( ...@@ -708,23 +737,23 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
new->async_tx.cookie = -EBUSY; new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list */ /* Set End-of-link to the last link descriptor of new list */
set_ld_eol(fsl_chan, new); set_ld_eol(chan, new);
/* Enable extra controller features */ /* Enable extra controller features */
if (fsl_chan->set_src_loop_size) if (chan->set_src_loop_size)
fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); chan->set_src_loop_size(chan, slave->src_loop_size);
if (fsl_chan->set_dest_loop_size) if (chan->set_dst_loop_size)
fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size); chan->set_dst_loop_size(chan, slave->dst_loop_size);
if (fsl_chan->toggle_ext_start) if (chan->toggle_ext_start)
fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); chan->toggle_ext_start(chan, slave->external_start);
if (fsl_chan->toggle_ext_pause) if (chan->toggle_ext_pause)
fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); chan->toggle_ext_pause(chan, slave->external_pause);
if (fsl_chan->set_request_count) if (chan->set_request_count)
fsl_chan->set_request_count(fsl_chan, slave->request_count); chan->set_request_count(chan, slave->request_count);
return &first->async_tx; return &first->async_tx;
...@@ -741,215 +770,216 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( ...@@ -741,215 +770,216 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
* *
* We're re-using variables for the loop, oh well * We're re-using variables for the loop, oh well
*/ */
tx_list = &first->tx_list; fsldma_free_desc_list_reverse(chan, &first->tx_list);
list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
list_del_init(&new->node);
dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
}
return NULL; return NULL;
} }
static void fsl_dma_device_terminate_all(struct dma_chan *chan) static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
{ {
struct fsl_dma_chan *fsl_chan; struct fsldma_chan *chan;
struct fsl_desc_sw *desc, *tmp;
unsigned long flags; unsigned long flags;
if (!chan) if (!dchan)
return; return;
fsl_chan = to_fsl_chan(chan); chan = to_fsl_chan(dchan);
/* Halt the DMA engine */ /* Halt the DMA engine */
dma_halt(fsl_chan); dma_halt(chan);
spin_lock_irqsave(&fsl_chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
/* Remove and free all of the descriptors in the LD queue */ /* Remove and free all of the descriptors in the LD queue */
list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { fsldma_free_desc_list(chan, &chan->ld_pending);
list_del(&desc->node); fsldma_free_desc_list(chan, &chan->ld_running);
dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
}
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); spin_unlock_irqrestore(&chan->desc_lock, flags);
} }
/** /**
* fsl_dma_update_completed_cookie - Update the completed cookie. * fsl_dma_update_completed_cookie - Update the completed cookie.
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
*
* CONTEXT: hardirq
*/ */
static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
{ {
struct fsl_desc_sw *cur_desc, *desc; struct fsl_desc_sw *desc;
dma_addr_t ld_phy; unsigned long flags;
dma_cookie_t cookie;
ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; spin_lock_irqsave(&chan->desc_lock, flags);
if (ld_phy) { if (list_empty(&chan->ld_running)) {
cur_desc = NULL; dev_dbg(chan->dev, "no running descriptors\n");
list_for_each_entry(desc, &fsl_chan->ld_queue, node) goto out_unlock;
if (desc->async_tx.phys == ld_phy) {
cur_desc = desc;
break;
} }
if (cur_desc && cur_desc->async_tx.cookie) { /* Get the last descriptor, update the cookie to that */
if (dma_is_idle(fsl_chan)) desc = to_fsl_desc(chan->ld_running.prev);
fsl_chan->completed_cookie = if (dma_is_idle(chan))
cur_desc->async_tx.cookie; cookie = desc->async_tx.cookie;
else else {
fsl_chan->completed_cookie = cookie = desc->async_tx.cookie - 1;
cur_desc->async_tx.cookie - 1; if (unlikely(cookie < DMA_MIN_COOKIE))
} cookie = DMA_MAX_COOKIE;
} }
chan->completed_cookie = cookie;
out_unlock:
spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
* fsldma_desc_status - Check the status of a descriptor
* @chan: Freescale DMA channel
* @desc: DMA SW descriptor
*
* This function will return the status of the given descriptor
*/
static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
return dma_async_is_complete(desc->async_tx.cookie,
chan->completed_cookie,
chan->common.cookie);
} }
/** /**
* fsl_chan_ld_cleanup - Clean up link descriptors * fsl_chan_ld_cleanup - Clean up link descriptors
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
* *
* This function clean up the ld_queue of DMA channel. * This function clean up the ld_queue of DMA channel.
* If 'in_intr' is set, the function will move the link descriptor to
* the recycle list. Otherwise, free it directly.
*/ */
static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
{ {
struct fsl_desc_sw *desc, *_desc; struct fsl_desc_sw *desc, *_desc;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&fsl_chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
fsl_chan->completed_cookie); list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *callback_param; void *callback_param;
if (dma_async_is_complete(desc->async_tx.cookie, if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
fsl_chan->completed_cookie, fsl_chan->common.cookie)
== DMA_IN_PROGRESS)
break; break;
callback = desc->async_tx.callback; /* Remove from the list of running transactions */
callback_param = desc->async_tx.callback_param;
/* Remove from ld_queue list */
list_del(&desc->node); list_del(&desc->node);
dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
desc);
dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
/* Run the link descriptor callback function */ /* Run the link descriptor callback function */
callback = desc->async_tx.callback;
callback_param = desc->async_tx.callback_param;
if (callback) { if (callback) {
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); spin_unlock_irqrestore(&chan->desc_lock, flags);
dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", dev_dbg(chan->dev, "LD %p callback\n", desc);
desc);
callback(callback_param); callback(callback_param);
spin_lock_irqsave(&fsl_chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
} }
/* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx);
dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
} }
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
spin_unlock_irqrestore(&chan->desc_lock, flags);
} }
/** /**
* fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. * fsl_chan_xfer_ld_queue - transfer any pending transactions
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
*
* This will make sure that any pending transactions will be run.
* If the DMA controller is idle, it will be started. Otherwise,
* the DMA controller's interrupt handler will start any pending
* transactions when it becomes idle.
*/ */
static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{ {
struct list_head *ld_node; struct fsl_desc_sw *desc;
dma_addr_t next_dest_addr;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&fsl_chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
if (!dma_is_idle(fsl_chan)) /*
* If the list of pending descriptors is empty, then we
* don't need to do any work at all
*/
if (list_empty(&chan->ld_pending)) {
dev_dbg(chan->dev, "no pending LDs\n");
goto out_unlock;
}
/*
* The DMA controller is not idle, which means the interrupt
* handler will start any queued transactions when it runs
* at the end of the current transaction
*/
if (!dma_is_idle(chan)) {
dev_dbg(chan->dev, "DMA controller still busy\n");
goto out_unlock; goto out_unlock;
}
dma_halt(fsl_chan); /*
* TODO:
* make sure the dma_halt() function really un-wedges the
* controller as much as possible
*/
dma_halt(chan);
/* If there are some link descriptors /*
* not transfered in queue. We need to start it. * If there are some link descriptors which have not been
* transferred, we need to start the controller
*/ */
/* Find the first un-transfer desciptor */ /*
for (ld_node = fsl_chan->ld_queue.next; * Move all elements from the queue of pending transactions
(ld_node != &fsl_chan->ld_queue) * onto the list of running transactions
&& (dma_async_is_complete( */
to_fsl_desc(ld_node)->async_tx.cookie, desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
fsl_chan->completed_cookie, list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
fsl_chan->common.cookie) == DMA_SUCCESS);
ld_node = ld_node->next); /*
* Program the descriptor's address into the DMA controller,
if (ld_node != &fsl_chan->ld_queue) { * then start the DMA transaction
/* Get the ld start address from ld_queue */ */
next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; set_cdar(chan, desc->async_tx.phys);
dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", dma_start(chan);
(unsigned long long)next_dest_addr);
set_cdar(fsl_chan, next_dest_addr);
dma_start(fsl_chan);
} else {
set_cdar(fsl_chan, 0);
set_ndar(fsl_chan, 0);
}
out_unlock: out_unlock:
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); spin_unlock_irqrestore(&chan->desc_lock, flags);
} }
/** /**
* fsl_dma_memcpy_issue_pending - Issue the DMA start command * fsl_dma_memcpy_issue_pending - Issue the DMA start command
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
*/ */
static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{ {
struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); struct fsldma_chan *chan = to_fsl_chan(dchan);
fsl_chan_xfer_ld_queue(chan);
#ifdef FSL_DMA_LD_DEBUG
struct fsl_desc_sw *ld;
unsigned long flags;
spin_lock_irqsave(&fsl_chan->desc_lock, flags);
if (list_empty(&fsl_chan->ld_queue)) {
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
return;
}
dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
int i;
dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
fsl_chan->id, ld->async_tx.phys);
for (i = 0; i < 8; i++)
dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
i, *(((u32 *)&ld->hw) + i));
}
dev_dbg(fsl_chan->dev, "----------------\n");
spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
#endif
fsl_chan_xfer_ld_queue(fsl_chan);
} }
/** /**
* fsl_dma_is_complete - Determine the DMA status * fsl_dma_is_complete - Determine the DMA status
* @fsl_chan : Freescale DMA channel * @chan : Freescale DMA channel
*/ */
static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
dma_cookie_t cookie, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *done,
dma_cookie_t *used) dma_cookie_t *used)
{ {
struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); struct fsldma_chan *chan = to_fsl_chan(dchan);
dma_cookie_t last_used; dma_cookie_t last_used;
dma_cookie_t last_complete; dma_cookie_t last_complete;
fsl_chan_ld_cleanup(fsl_chan); fsl_chan_ld_cleanup(chan);
last_used = chan->cookie; last_used = dchan->cookie;
last_complete = fsl_chan->completed_cookie; last_complete = chan->completed_cookie;
if (done) if (done)
*done = last_complete; *done = last_complete;
...@@ -960,32 +990,37 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, ...@@ -960,32 +990,37 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
return dma_async_is_complete(cookie, last_complete, last_used); return dma_async_is_complete(cookie, last_complete, last_used);
} }
static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) /*----------------------------------------------------------------------------*/
/* Interrupt Handling */
/*----------------------------------------------------------------------------*/
static irqreturn_t fsldma_chan_irq(int irq, void *data)
{ {
struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; struct fsldma_chan *chan = data;
u32 stat;
int update_cookie = 0; int update_cookie = 0;
int xfer_ld_q = 0; int xfer_ld_q = 0;
u32 stat;
stat = get_sr(fsl_chan); /* save and clear the status register */
dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", stat = get_sr(chan);
fsl_chan->id, stat); set_sr(chan, stat);
set_sr(fsl_chan, stat); /* Clear the event register */ dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
if (!stat) if (!stat)
return IRQ_NONE; return IRQ_NONE;
if (stat & FSL_DMA_SR_TE) if (stat & FSL_DMA_SR_TE)
dev_err(fsl_chan->dev, "Transfer Error!\n"); dev_err(chan->dev, "Transfer Error!\n");
/* Programming Error /*
* Programming Error
* The DMA_INTERRUPT async_tx is a NULL transfer, which will * The DMA_INTERRUPT async_tx is a NULL transfer, which will
* triger a PE interrupt. * triger a PE interrupt.
*/ */
if (stat & FSL_DMA_SR_PE) { if (stat & FSL_DMA_SR_PE) {
dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); dev_dbg(chan->dev, "irq: Programming Error INT\n");
if (get_bcr(fsl_chan) == 0) { if (get_bcr(chan) == 0) {
/* BCR register is 0, this is a DMA_INTERRUPT async_tx. /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
* Now, update the completed cookie, and continue the * Now, update the completed cookie, and continue the
* next uncompleted transfer. * next uncompleted transfer.
...@@ -996,208 +1031,296 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) ...@@ -996,208 +1031,296 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
stat &= ~FSL_DMA_SR_PE; stat &= ~FSL_DMA_SR_PE;
} }
/* If the link descriptor segment transfer finishes, /*
* If the link descriptor segment transfer finishes,
* we will recycle the used descriptor. * we will recycle the used descriptor.
*/ */
if (stat & FSL_DMA_SR_EOSI) { if (stat & FSL_DMA_SR_EOSI) {
dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); dev_dbg(chan->dev, "irq: End-of-segments INT\n");
dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
(unsigned long long)get_cdar(fsl_chan), (unsigned long long)get_cdar(chan),
(unsigned long long)get_ndar(fsl_chan)); (unsigned long long)get_ndar(chan));
stat &= ~FSL_DMA_SR_EOSI; stat &= ~FSL_DMA_SR_EOSI;
update_cookie = 1; update_cookie = 1;
} }
/* For MPC8349, EOCDI event need to update cookie /*
* For MPC8349, EOCDI event need to update cookie
* and start the next transfer if it exist. * and start the next transfer if it exist.
*/ */
if (stat & FSL_DMA_SR_EOCDI) { if (stat & FSL_DMA_SR_EOCDI) {
dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
stat &= ~FSL_DMA_SR_EOCDI; stat &= ~FSL_DMA_SR_EOCDI;
update_cookie = 1; update_cookie = 1;
xfer_ld_q = 1; xfer_ld_q = 1;
} }
/* If it current transfer is the end-of-transfer, /*
* If it current transfer is the end-of-transfer,
* we should clear the Channel Start bit for * we should clear the Channel Start bit for
* prepare next transfer. * prepare next transfer.
*/ */
if (stat & FSL_DMA_SR_EOLNI) { if (stat & FSL_DMA_SR_EOLNI) {
dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); dev_dbg(chan->dev, "irq: End-of-link INT\n");
stat &= ~FSL_DMA_SR_EOLNI; stat &= ~FSL_DMA_SR_EOLNI;
xfer_ld_q = 1; xfer_ld_q = 1;
} }
if (update_cookie) if (update_cookie)
fsl_dma_update_completed_cookie(fsl_chan); fsl_dma_update_completed_cookie(chan);
if (xfer_ld_q) if (xfer_ld_q)
fsl_chan_xfer_ld_queue(fsl_chan); fsl_chan_xfer_ld_queue(chan);
if (stat) if (stat)
dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
stat);
dev_dbg(fsl_chan->dev, "event: Exit\n"); dev_dbg(chan->dev, "irq: Exit\n");
tasklet_schedule(&fsl_chan->tasklet); tasklet_schedule(&chan->tasklet);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) static void dma_do_tasklet(unsigned long data)
{
struct fsldma_chan *chan = (struct fsldma_chan *)data;
fsl_chan_ld_cleanup(chan);
}
static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
{ {
struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; struct fsldma_device *fdev = data;
u32 gsr; struct fsldma_chan *chan;
int ch_nr; unsigned int handled = 0;
u32 gsr, mask;
int i;
gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
: in_le32(fdev->regs);
mask = 0xff000000;
dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
chan = fdev->chan[i];
if (!chan)
continue;
if (gsr & mask) {
dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
fsldma_chan_irq(irq, chan);
handled++;
}
gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) gsr &= ~mask;
: in_le32(fdev->reg_base); mask >>= 8;
ch_nr = (32 - ffs(gsr)) / 8; }
return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, return IRQ_RETVAL(handled);
fdev->chan[ch_nr]) : IRQ_NONE;
} }
static void dma_do_tasklet(unsigned long data) static void fsldma_free_irqs(struct fsldma_device *fdev)
{
struct fsldma_chan *chan;
int i;
if (fdev->irq != NO_IRQ) {
dev_dbg(fdev->dev, "free per-controller IRQ\n");
free_irq(fdev->irq, fdev);
return;
}
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
chan = fdev->chan[i];
if (chan && chan->irq != NO_IRQ) {
dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
free_irq(chan->irq, chan);
}
}
}
static int fsldma_request_irqs(struct fsldma_device *fdev)
{ {
struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; struct fsldma_chan *chan;
fsl_chan_ld_cleanup(fsl_chan); int ret;
int i;
/* if we have a per-controller IRQ, use that */
if (fdev->irq != NO_IRQ) {
dev_dbg(fdev->dev, "request per-controller IRQ\n");
ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
"fsldma-controller", fdev);
return ret;
}
/* no per-controller IRQ, use the per-channel IRQs */
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
chan = fdev->chan[i];
if (!chan)
continue;
if (chan->irq == NO_IRQ) {
dev_err(fdev->dev, "no interrupts property defined for "
"DMA channel %d. Please fix your "
"device tree\n", chan->id);
ret = -ENODEV;
goto out_unwind;
}
dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
"fsldma-chan", chan);
if (ret) {
dev_err(fdev->dev, "unable to request IRQ for DMA "
"channel %d\n", chan->id);
goto out_unwind;
}
}
return 0;
out_unwind:
for (/* none */; i >= 0; i--) {
chan = fdev->chan[i];
if (!chan)
continue;
if (chan->irq == NO_IRQ)
continue;
free_irq(chan->irq, chan);
}
return ret;
} }
static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, /*----------------------------------------------------------------------------*/
/* OpenFirmware Subsystem */
/*----------------------------------------------------------------------------*/
static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
struct device_node *node, u32 feature, const char *compatible) struct device_node *node, u32 feature, const char *compatible)
{ {
struct fsl_dma_chan *new_fsl_chan; struct fsldma_chan *chan;
struct resource res;
int err; int err;
/* alloc channel */ /* alloc channel */
new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); chan = kzalloc(sizeof(*chan), GFP_KERNEL);
if (!new_fsl_chan) { if (!chan) {
dev_err(fdev->dev, "No free memory for allocating " dev_err(fdev->dev, "no free memory for DMA channels!\n");
"dma channels!\n"); err = -ENOMEM;
return -ENOMEM; goto out_return;
} }
/* get dma channel register base */ /* ioremap registers for use */
err = of_address_to_resource(node, 0, &new_fsl_chan->reg); chan->regs = of_iomap(node, 0);
if (err) { if (!chan->regs) {
dev_err(fdev->dev, "Can't get %s property 'reg'\n", dev_err(fdev->dev, "unable to ioremap registers\n");
node->full_name); err = -ENOMEM;
goto err_no_reg; goto out_free_chan;
} }
new_fsl_chan->feature = feature; err = of_address_to_resource(node, 0, &res);
if (err) {
dev_err(fdev->dev, "unable to find 'reg' property\n");
goto out_iounmap_regs;
}
chan->feature = feature;
if (!fdev->feature) if (!fdev->feature)
fdev->feature = new_fsl_chan->feature; fdev->feature = chan->feature;
/* If the DMA device's feature is different than its channels', /*
* report the bug. * If the DMA device's feature is different than the feature
* of its channels, report the bug
*/ */
WARN_ON(fdev->feature != new_fsl_chan->feature); WARN_ON(fdev->feature != chan->feature);
new_fsl_chan->dev = fdev->dev;
new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; chan->dev = fdev->dev;
if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { chan->id = ((res.start - 0x100) & 0xfff) >> 7;
dev_err(fdev->dev, "There is no %d channel!\n", if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
new_fsl_chan->id); dev_err(fdev->dev, "too many channels for device\n");
err = -EINVAL; err = -EINVAL;
goto err_no_chan; goto out_iounmap_regs;
} }
fdev->chan[new_fsl_chan->id] = new_fsl_chan;
tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
(unsigned long)new_fsl_chan);
/* Init the channel */ fdev->chan[chan->id] = chan;
dma_init(new_fsl_chan); tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
/* Initialize the channel */
dma_init(chan);
/* Clear cdar registers */ /* Clear cdar registers */
set_cdar(new_fsl_chan, 0); set_cdar(chan, 0);
switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX: case FSL_DMA_IP_85XX:
new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
case FSL_DMA_IP_83XX: case FSL_DMA_IP_83XX:
new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; chan->toggle_ext_start = fsl_chan_toggle_ext_start;
new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; chan->set_src_loop_size = fsl_chan_set_src_loop_size;
new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
new_fsl_chan->set_request_count = fsl_chan_set_request_count; chan->set_request_count = fsl_chan_set_request_count;
} }
spin_lock_init(&new_fsl_chan->desc_lock); spin_lock_init(&chan->desc_lock);
INIT_LIST_HEAD(&new_fsl_chan->ld_queue); INIT_LIST_HEAD(&chan->ld_pending);
INIT_LIST_HEAD(&chan->ld_running);
chan->common.device = &fdev->common;
new_fsl_chan->common.device = &fdev->common; /* find the IRQ line, if it exists in the device tree */
chan->irq = irq_of_parse_and_map(node, 0);
/* Add the channel to DMA device channel list */ /* Add the channel to DMA device channel list */
list_add_tail(&new_fsl_chan->common.device_node, list_add_tail(&chan->common.device_node, &fdev->common.channels);
&fdev->common.channels);
fdev->common.chancnt++; fdev->common.chancnt++;
new_fsl_chan->irq = irq_of_parse_and_map(node, 0); dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
if (new_fsl_chan->irq != NO_IRQ) { chan->irq != NO_IRQ ? chan->irq : fdev->irq);
err = request_irq(new_fsl_chan->irq,
&fsl_dma_chan_do_interrupt, IRQF_SHARED,
"fsldma-channel", new_fsl_chan);
if (err) {
dev_err(fdev->dev, "DMA channel %s request_irq error "
"with return %d\n", node->full_name, err);
goto err_no_irq;
}
}
dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
compatible,
new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
return 0; return 0;
err_no_irq: out_iounmap_regs:
list_del(&new_fsl_chan->common.device_node); iounmap(chan->regs);
err_no_chan: out_free_chan:
iounmap(new_fsl_chan->reg_base); kfree(chan);
err_no_reg: out_return:
kfree(new_fsl_chan);
return err; return err;
} }
static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) static void fsl_dma_chan_remove(struct fsldma_chan *chan)
{ {
if (fchan->irq != NO_IRQ) irq_dispose_mapping(chan->irq);
free_irq(fchan->irq, fchan); list_del(&chan->common.device_node);
list_del(&fchan->common.device_node); iounmap(chan->regs);
iounmap(fchan->reg_base); kfree(chan);
kfree(fchan);
} }
static int __devinit of_fsl_dma_probe(struct of_device *dev, static int __devinit fsldma_of_probe(struct of_device *op,
const struct of_device_id *match) const struct of_device_id *match)
{ {
int err; struct fsldma_device *fdev;
struct fsl_dma_device *fdev;
struct device_node *child; struct device_node *child;
int err;
fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
if (!fdev) { if (!fdev) {
dev_err(&dev->dev, "No enough memory for 'priv'\n"); dev_err(&op->dev, "No enough memory for 'priv'\n");
return -ENOMEM; err = -ENOMEM;
goto out_return;
} }
fdev->dev = &dev->dev;
fdev->dev = &op->dev;
INIT_LIST_HEAD(&fdev->common.channels); INIT_LIST_HEAD(&fdev->common.channels);
/* get DMA controller register base */ /* ioremap the registers for use */
err = of_address_to_resource(dev->node, 0, &fdev->reg); fdev->regs = of_iomap(op->node, 0);
if (err) { if (!fdev->regs) {
dev_err(&dev->dev, "Can't get %s property 'reg'\n", dev_err(&op->dev, "unable to ioremap registers\n");
dev->node->full_name); err = -ENOMEM;
goto err_no_reg; goto out_free_fdev;
} }
dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " /* map the channel IRQ if it exists, but don't hookup the handler yet */
"controller at 0x%llx...\n", fdev->irq = irq_of_parse_and_map(op->node, 0);
match->compatible, (unsigned long long)fdev->reg.start);
fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
- fdev->reg.start + 1);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
...@@ -1210,103 +1333,111 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, ...@@ -1210,103 +1333,111 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
fdev->common.device_terminate_all = fsl_dma_device_terminate_all; fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
fdev->common.dev = &dev->dev; fdev->common.dev = &op->dev;
fdev->irq = irq_of_parse_and_map(dev->node, 0);
if (fdev->irq != NO_IRQ) {
err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
"fsldma-device", fdev);
if (err) {
dev_err(&dev->dev, "DMA device request_irq error "
"with return %d\n", err);
goto err;
}
}
dev_set_drvdata(&(dev->dev), fdev); dev_set_drvdata(&op->dev, fdev);
/* We cannot use of_platform_bus_probe() because there is no /*
* of_platform_bus_remove. Instead, we manually instantiate every DMA * We cannot use of_platform_bus_probe() because there is no
* of_platform_bus_remove(). Instead, we manually instantiate every DMA
* channel object. * channel object.
*/ */
for_each_child_of_node(dev->node, child) { for_each_child_of_node(op->node, child) {
if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
fsl_dma_chan_probe(fdev, child, fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
"fsl,eloplus-dma-channel"); "fsl,eloplus-dma-channel");
if (of_device_is_compatible(child, "fsl,elo-dma-channel")) }
if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
fsl_dma_chan_probe(fdev, child, fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
"fsl,elo-dma-channel"); "fsl,elo-dma-channel");
} }
}
/*
* Hookup the IRQ handler(s)
*
* If we have a per-controller interrupt, we prefer that to the
* per-channel interrupts to reduce the number of shared interrupt
* handlers on the same IRQ line
*/
err = fsldma_request_irqs(fdev);
if (err) {
dev_err(fdev->dev, "unable to request IRQs\n");
goto out_free_fdev;
}
dma_async_device_register(&fdev->common); dma_async_device_register(&fdev->common);
return 0; return 0;
err: out_free_fdev:
iounmap(fdev->reg_base); irq_dispose_mapping(fdev->irq);
err_no_reg:
kfree(fdev); kfree(fdev);
out_return:
return err; return err;
} }
static int of_fsl_dma_remove(struct of_device *of_dev) static int fsldma_of_remove(struct of_device *op)
{ {
struct fsl_dma_device *fdev; struct fsldma_device *fdev;
unsigned int i; unsigned int i;
fdev = dev_get_drvdata(&of_dev->dev); fdev = dev_get_drvdata(&op->dev);
dma_async_device_unregister(&fdev->common); dma_async_device_unregister(&fdev->common);
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) fsldma_free_irqs(fdev);
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
if (fdev->chan[i]) if (fdev->chan[i])
fsl_dma_chan_remove(fdev->chan[i]); fsl_dma_chan_remove(fdev->chan[i]);
}
if (fdev->irq != NO_IRQ) iounmap(fdev->regs);
free_irq(fdev->irq, fdev); dev_set_drvdata(&op->dev, NULL);
iounmap(fdev->reg_base);
kfree(fdev); kfree(fdev);
dev_set_drvdata(&of_dev->dev, NULL);
return 0; return 0;
} }
static struct of_device_id of_fsl_dma_ids[] = { static const struct of_device_id fsldma_of_ids[] = {
{ .compatible = "fsl,eloplus-dma", }, { .compatible = "fsl,eloplus-dma", },
{ .compatible = "fsl,elo-dma", }, { .compatible = "fsl,elo-dma", },
{} {}
}; };
static struct of_platform_driver of_fsl_dma_driver = { static struct of_platform_driver fsldma_of_driver = {
.name = "fsl-elo-dma", .name = "fsl-elo-dma",
.match_table = of_fsl_dma_ids, .match_table = fsldma_of_ids,
.probe = of_fsl_dma_probe, .probe = fsldma_of_probe,
.remove = of_fsl_dma_remove, .remove = fsldma_of_remove,
}; };
static __init int of_fsl_dma_init(void) /*----------------------------------------------------------------------------*/
/* Module Init / Exit */
/*----------------------------------------------------------------------------*/
static __init int fsldma_init(void)
{ {
int ret; int ret;
pr_info("Freescale Elo / Elo Plus DMA driver\n"); pr_info("Freescale Elo / Elo Plus DMA driver\n");
ret = of_register_platform_driver(&of_fsl_dma_driver); ret = of_register_platform_driver(&fsldma_of_driver);
if (ret) if (ret)
pr_err("fsldma: failed to register platform driver\n"); pr_err("fsldma: failed to register platform driver\n");
return ret; return ret;
} }
static void __exit of_fsl_dma_exit(void) static void __exit fsldma_exit(void)
{ {
of_unregister_platform_driver(&of_fsl_dma_driver); of_unregister_platform_driver(&fsldma_of_driver);
} }
subsys_initcall(of_fsl_dma_init); subsys_initcall(fsldma_init);
module_exit(of_fsl_dma_exit); module_exit(fsldma_exit);
MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
...@@ -92,11 +92,9 @@ struct fsl_desc_sw { ...@@ -92,11 +92,9 @@ struct fsl_desc_sw {
struct list_head node; struct list_head node;
struct list_head tx_list; struct list_head tx_list;
struct dma_async_tx_descriptor async_tx; struct dma_async_tx_descriptor async_tx;
struct list_head *ld;
void *priv;
} __attribute__((aligned(32))); } __attribute__((aligned(32)));
struct fsl_dma_chan_regs { struct fsldma_chan_regs {
u32 mr; /* 0x00 - Mode Register */ u32 mr; /* 0x00 - Mode Register */
u32 sr; /* 0x04 - Status Register */ u32 sr; /* 0x04 - Status Register */
u64 cdar; /* 0x08 - Current descriptor address register */ u64 cdar; /* 0x08 - Current descriptor address register */
...@@ -106,20 +104,19 @@ struct fsl_dma_chan_regs { ...@@ -106,20 +104,19 @@ struct fsl_dma_chan_regs {
u64 ndar; /* 0x24 - Next Descriptor Address Register */ u64 ndar; /* 0x24 - Next Descriptor Address Register */
}; };
struct fsl_dma_chan; struct fsldma_chan;
#define FSL_DMA_MAX_CHANS_PER_DEVICE 4 #define FSL_DMA_MAX_CHANS_PER_DEVICE 4
struct fsl_dma_device { struct fsldma_device {
void __iomem *reg_base; /* DGSR register base */ void __iomem *regs; /* DGSR register base */
struct resource reg; /* Resource for register */
struct device *dev; struct device *dev;
struct dma_device common; struct dma_device common;
struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE]; struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */ u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */ int irq; /* Channel IRQ */
}; };
/* Define macros for fsl_dma_chan->feature property */ /* Define macros for fsldma_chan->feature property */
#define FSL_DMA_LITTLE_ENDIAN 0x00000000 #define FSL_DMA_LITTLE_ENDIAN 0x00000000
#define FSL_DMA_BIG_ENDIAN 0x00000001 #define FSL_DMA_BIG_ENDIAN 0x00000001
...@@ -130,28 +127,28 @@ struct fsl_dma_device { ...@@ -130,28 +127,28 @@ struct fsl_dma_device {
#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
#define FSL_DMA_CHAN_START_EXT 0x00002000 #define FSL_DMA_CHAN_START_EXT 0x00002000
struct fsl_dma_chan { struct fsldma_chan {
struct fsl_dma_chan_regs __iomem *reg_base; struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */ dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */ spinlock_t desc_lock; /* Descriptor operation lock */
struct list_head ld_queue; /* Link descriptors queue */ struct list_head ld_pending; /* Link descriptors queue */
struct list_head ld_running; /* Link descriptors queue */
struct dma_chan common; /* DMA common channel */ struct dma_chan common; /* DMA common channel */
struct dma_pool *desc_pool; /* Descriptors pool */ struct dma_pool *desc_pool; /* Descriptors pool */
struct device *dev; /* Channel device */ struct device *dev; /* Channel device */
struct resource reg; /* Resource for register */
int irq; /* Channel IRQ */ int irq; /* Channel IRQ */
int id; /* Raw id of this channel */ int id; /* Raw id of this channel */
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u32 feature; u32 feature;
void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
}; };
#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) #define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node) #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx) #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
......
...@@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) ...@@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void ioat1_cleanup_tasklet(unsigned long data);
/* common channel initialization */ /* common channel initialization */
void ioat_init_channel(struct ioatdma_device *device, void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
struct ioat_chan_common *chan, int idx,
void (*timer_fn)(unsigned long),
void (*tasklet)(unsigned long),
unsigned long ioat)
{ {
struct dma_device *dma = &device->common; struct dma_device *dma = &device->common;
struct dma_chan *c = &chan->common;
unsigned long data = (unsigned long) c;
chan->device = device; chan->device = device;
chan->reg_base = device->reg_base + (0x80 * (idx + 1)); chan->reg_base = device->reg_base + (0x80 * (idx + 1));
...@@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device, ...@@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device,
list_add_tail(&chan->common.device_node, &dma->channels); list_add_tail(&chan->common.device_node, &dma->channels);
device->idx[idx] = chan; device->idx[idx] = chan;
init_timer(&chan->timer); init_timer(&chan->timer);
chan->timer.function = timer_fn; chan->timer.function = device->timer_fn;
chan->timer.data = ioat; chan->timer.data = data;
tasklet_init(&chan->cleanup_task, tasklet, ioat); tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
tasklet_disable(&chan->cleanup_task); tasklet_disable(&chan->cleanup_task);
} }
static void ioat1_timer_event(unsigned long data);
/** /**
* ioat1_dma_enumerate_channels - find and initialize the device's channels * ioat1_dma_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated * @device: the device to be enumerated
...@@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) ...@@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device)
if (!ioat) if (!ioat)
break; break;
ioat_init_channel(device, &ioat->base, i, ioat_init_channel(device, &ioat->base, i);
ioat1_timer_event,
ioat1_cleanup_tasklet,
(unsigned long) ioat);
ioat->xfercap = xfercap; ioat->xfercap = xfercap;
spin_lock_init(&ioat->desc_lock); spin_lock_init(&ioat->desc_lock);
INIT_LIST_HEAD(&ioat->free_desc); INIT_LIST_HEAD(&ioat->free_desc);
...@@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, ...@@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
return &desc->txd; return &desc->txd;
} }
static void ioat1_cleanup_tasklet(unsigned long data) static void ioat1_cleanup_event(unsigned long data)
{ {
struct ioat_dma_chan *chan = (void *)data; struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
ioat1_cleanup(chan); ioat1_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
} }
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
...@@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) ...@@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
static void ioat1_timer_event(unsigned long data) static void ioat1_timer_event(unsigned long data)
{ {
struct ioat_dma_chan *ioat = (void *) data; struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
...@@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data) ...@@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data)
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
} }
static enum dma_status enum dma_status
ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used) dma_cookie_t *done, dma_cookie_t *used)
{ {
struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device;
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS; return DMA_SUCCESS;
ioat1_cleanup(ioat); device->cleanup_fn((unsigned long) c);
return ioat_is_complete(c, cookie, done, used); return ioat_is_complete(c, cookie, done, used);
} }
...@@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) ...@@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
device->intr_quirk = ioat1_intr_quirk; device->intr_quirk = ioat1_intr_quirk;
device->enumerate_channels = ioat1_enumerate_channels; device->enumerate_channels = ioat1_enumerate_channels;
device->self_test = ioat_dma_self_test; device->self_test = ioat_dma_self_test;
device->timer_fn = ioat1_timer_event;
device->cleanup_fn = ioat1_cleanup_event;
dma = &device->common; dma = &device->common;
dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources; dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
dma->device_is_tx_complete = ioat1_dma_is_complete; dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device); err = ioat_probe(device);
if (err) if (err)
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices) * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration * @enumerate_channels: hw version specific channel enumeration
* @reset_hw: hw version specific channel (re)initialization * @reset_hw: hw version specific channel (re)initialization
* @cleanup_tasklet: select between the v2 and v3 cleanup routines * @cleanup_fn: select between the v2 and v3 cleanup routines
* @timer_fn: select between the v2 and v3 timer watchdog routines * @timer_fn: select between the v2 and v3 timer watchdog routines
* @self_test: hardware version specific self test for each supported op type * @self_test: hardware version specific self test for each supported op type
* *
...@@ -80,7 +80,7 @@ struct ioatdma_device { ...@@ -80,7 +80,7 @@ struct ioatdma_device {
void (*intr_quirk)(struct ioatdma_device *device); void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device); int (*enumerate_channels)(struct ioatdma_device *device);
int (*reset_hw)(struct ioat_chan_common *chan); int (*reset_hw)(struct ioat_chan_common *chan);
void (*cleanup_tasklet)(unsigned long data); void (*cleanup_fn)(unsigned long data);
void (*timer_fn)(unsigned long data); void (*timer_fn)(unsigned long data);
int (*self_test)(struct ioatdma_device *device); int (*self_test)(struct ioatdma_device *device);
}; };
...@@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, ...@@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase); void __iomem *iobase);
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device, void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx, struct ioat_chan_common *chan, int idx);
void (*timer_fn)(unsigned long), enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
void (*tasklet)(unsigned long), dma_cookie_t *done, dma_cookie_t *used);
unsigned long ioat);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw); size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan, bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
......
...@@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order, ...@@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order,
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
{ {
void * __iomem reg_base = ioat->base.reg_base; struct ioat_chan_common *chan = &ioat->base;
ioat->pending = 0;
ioat->dmacount += ioat2_ring_pending(ioat); ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head; ioat->issued = ioat->head;
/* make descriptor updates globally visible before notifying channel */ /* make descriptor updates globally visible before notifying channel */
wmb(); wmb();
writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
dev_dbg(to_dev(&ioat->base), dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n", "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
} }
void ioat2_issue_pending(struct dma_chan *chan) void ioat2_issue_pending(struct dma_chan *c)
{ {
struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
if (ioat2_ring_pending(ioat)) {
spin_lock_bh(&ioat->ring_lock); spin_lock_bh(&ioat->ring_lock);
if (ioat->pending == 1)
__ioat2_issue_pending(ioat); __ioat2_issue_pending(ioat);
spin_unlock_bh(&ioat->ring_lock); spin_unlock_bh(&ioat->ring_lock);
}
} }
/** /**
* ioat2_update_pending - log pending descriptors * ioat2_update_pending - log pending descriptors
* @ioat: ioat2+ channel * @ioat: ioat2+ channel
* *
* set pending to '1' unless pending is already set to '2', pending == 2 * Check if the number of unsubmitted descriptors has exceeded the
* indicates that submission is temporarily blocked due to an in-flight * watermark. Called with ring_lock held
* reset. If we are already above the ioat_pending_level threshold then
* just issue pending.
*
* called with ring_lock held
*/ */
static void ioat2_update_pending(struct ioat2_dma_chan *ioat) static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{ {
if (unlikely(ioat->pending == 2)) if (ioat2_ring_pending(ioat) > ioat_pending_level)
return;
else if (ioat2_ring_pending(ioat) > ioat_pending_level)
__ioat2_issue_pending(ioat); __ioat2_issue_pending(ioat);
else
ioat->pending = 1;
} }
static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
...@@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ...@@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
seen_current = true; seen_current = true;
} }
ioat->tail += i; ioat->tail += i;
BUG_ON(!seen_current); /* no active descs have written a completion? */ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete; chan->last_completion = phys_complete;
if (ioat->head == ioat->tail) { if (ioat->head == ioat->tail) {
...@@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) ...@@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
} }
void ioat2_cleanup_tasklet(unsigned long data) void ioat2_cleanup_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
ioat2_cleanup(ioat); ioat2_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
...@@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) ...@@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
void ioat2_timer_event(unsigned long data) void ioat2_timer_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&chan->cleanup_lock);
...@@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) ...@@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
if (!ioat) if (!ioat)
break; break;
ioat_init_channel(device, &ioat->base, i, ioat_init_channel(device, &ioat->base, i);
device->timer_fn,
device->cleanup_tasklet,
(unsigned long) ioat);
ioat->xfercap_log = xfercap_log; ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->ring_lock); spin_lock_init(&ioat->ring_lock);
if (device->reset_hw(&ioat->base)) { if (device->reset_hw(&ioat->base)) {
...@@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) ...@@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->head = 0; ioat->head = 0;
ioat->issued = 0; ioat->issued = 0;
ioat->tail = 0; ioat->tail = 0;
ioat->pending = 0;
ioat->alloc_order = order; ioat->alloc_order = order;
spin_unlock_bh(&ioat->ring_lock); spin_unlock_bh(&ioat->ring_lock);
...@@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) ...@@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
device->timer_fn((unsigned long) ioat); device->timer_fn((unsigned long) &chan->common);
} else } else
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
return -ENOMEM; return -ENOMEM;
...@@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c) ...@@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
tasklet_disable(&chan->cleanup_task); tasklet_disable(&chan->cleanup_task);
del_timer_sync(&chan->timer); del_timer_sync(&chan->timer);
device->cleanup_tasklet((unsigned long) ioat); device->cleanup_fn((unsigned long) c);
device->reset_hw(chan); device->reset_hw(chan);
spin_lock_bh(&ioat->ring_lock); spin_lock_bh(&ioat->ring_lock);
...@@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c) ...@@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c)
chan->last_completion = 0; chan->last_completion = 0;
chan->completion_dma = 0; chan->completion_dma = 0;
ioat->pending = 0;
ioat->dmacount = 0; ioat->dmacount = 0;
} }
enum dma_status
ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioatdma_device *device = ioat->base.device;
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS;
device->cleanup_tasklet((unsigned long) ioat);
return ioat_is_complete(c, cookie, done, used);
}
static ssize_t ring_size_show(struct dma_chan *c, char *page) static ssize_t ring_size_show(struct dma_chan *c, char *page)
{ {
struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
...@@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) ...@@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
device->enumerate_channels = ioat2_enumerate_channels; device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat2_reset_hw; device->reset_hw = ioat2_reset_hw;
device->cleanup_tasklet = ioat2_cleanup_tasklet; device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event; device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test; device->self_test = ioat_dma_self_test;
dma = &device->common; dma = &device->common;
...@@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) ...@@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat2_issue_pending; dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources;
dma->device_is_tx_complete = ioat2_is_complete; dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device); err = ioat_probe(device);
if (err) if (err)
......
...@@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order; ...@@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order;
* @head: allocated index * @head: allocated index
* @issued: hardware notification point * @issued: hardware notification point
* @tail: cleanup index * @tail: cleanup index
* @pending: lock free indicator for issued != head
* @dmacount: identical to 'head' except for occasionally resetting to zero * @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors * @alloc_order: log2 of the number of allocated descriptors
* @ring: software ring buffer implementation of hardware ring * @ring: software ring buffer implementation of hardware ring
...@@ -61,7 +60,6 @@ struct ioat2_dma_chan { ...@@ -61,7 +60,6 @@ struct ioat2_dma_chan {
u16 tail; u16 tail;
u16 dmacount; u16 dmacount;
u16 alloc_order; u16 alloc_order;
int pending;
struct ioat_ring_ent **ring; struct ioat_ring_ent **ring;
spinlock_t ring_lock; spinlock_t ring_lock;
}; };
...@@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, ...@@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
void ioat2_issue_pending(struct dma_chan *chan); void ioat2_issue_pending(struct dma_chan *chan);
int ioat2_alloc_chan_resources(struct dma_chan *c); int ioat2_alloc_chan_resources(struct dma_chan *c);
void ioat2_free_chan_resources(struct dma_chan *c); void ioat2_free_chan_resources(struct dma_chan *c);
enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used);
void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
bool reshape_ring(struct ioat2_dma_chan *ioat, int order); bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
void ioat2_cleanup_tasklet(unsigned long data); void ioat2_cleanup_event(unsigned long data);
void ioat2_timer_event(unsigned long data); void ioat2_timer_event(unsigned long data);
int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo); int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo); int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
......
...@@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ...@@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
} }
} }
ioat->tail += i; ioat->tail += i;
BUG_ON(!seen_current); /* no active descs have written a completion? */ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete; chan->last_completion = phys_complete;
if (ioat->head == ioat->tail) {
active = ioat2_ring_active(ioat);
if (active == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__); __func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state); clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
} }
/* 5 microsecond delay per pending descriptor */
writew(min((5 * active), IOAT_INTRDELAY_MASK),
chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
} }
static void ioat3_cleanup(struct ioat2_dma_chan *ioat) /* try to cleanup, but yield (via spin_trylock) to incoming submissions
* with the expectation that we will immediately poll again shortly
*/
static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; unsigned long phys_complete;
...@@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat) ...@@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock); spin_unlock_bh(&chan->cleanup_lock);
} }
static void ioat3_cleanup_tasklet(unsigned long data) /* run cleanup now because we already delayed the interrupt via INTRDELAY */
static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
prefetch(chan->completion);
spin_lock_bh(&chan->cleanup_lock);
if (!ioat_cleanup_preamble(chan, &phys_complete)) {
spin_unlock_bh(&chan->cleanup_lock);
return;
}
spin_lock_bh(&ioat->ring_lock);
__cleanup(ioat, phys_complete);
spin_unlock_bh(&ioat->ring_lock);
spin_unlock_bh(&chan->cleanup_lock);
}
static void ioat3_cleanup_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
ioat3_cleanup(ioat); ioat3_cleanup_sync(ioat);
writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN, writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
} }
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; unsigned long phys_complete;
u32 status;
status = ioat_chansts(chan);
if (is_ioat_active(status) || is_ioat_idle(status))
ioat_suspend(chan);
while (is_ioat_active(status) || is_ioat_idle(status)) {
status = ioat_chansts(chan);
cpu_relax();
}
ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete)) if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete); __cleanup(ioat, phys_complete);
...@@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) ...@@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
static void ioat3_timer_event(unsigned long data) static void ioat3_timer_event(unsigned long data)
{ {
struct ioat2_dma_chan *ioat = (void *) data; struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&chan->cleanup_lock);
...@@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, ...@@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS; return DMA_SUCCESS;
ioat3_cleanup(ioat); ioat3_cleanup_poll(ioat);
return ioat_is_complete(c, cookie, done, used); return ioat_is_complete(c, cookie, done, used);
} }
...@@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) ...@@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (is_raid_device) { if (is_raid_device) {
dma->device_is_tx_complete = ioat3_is_complete; dma->device_is_tx_complete = ioat3_is_complete;
device->cleanup_tasklet = ioat3_cleanup_tasklet; device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event; device->timer_fn = ioat3_timer_event;
} else { } else {
dma->device_is_tx_complete = ioat2_is_complete; dma->device_is_tx_complete = ioat_is_dma_complete;
device->cleanup_tasklet = ioat2_cleanup_tasklet; device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event; device->timer_fn = ioat2_timer_event;
} }
......
...@@ -60,7 +60,7 @@ ...@@ -60,7 +60,7 @@
#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ #define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ #define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ #define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */
#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ #define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
......
...@@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, ...@@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
break; break;
case IPU_PIX_FMT_BGRA32: case IPU_PIX_FMT_BGRA32:
case IPU_PIX_FMT_BGR32: case IPU_PIX_FMT_BGR32:
case IPU_PIX_FMT_ABGR32:
params->ip.bpp = 0; params->ip.bpp = 0;
params->ip.pfs = 4; params->ip.pfs = 4;
params->ip.npb = 7; params->ip.npb = 7;
...@@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params, ...@@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->ip.wid2 = 7; /* Blue bit width - 1 */ params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */ params->ip.wid3 = 7; /* Alpha bit width - 1 */
break; break;
case IPU_PIX_FMT_ABGR32:
params->ip.bpp = 0;
params->ip.pfs = 4;
params->ip.npb = 7;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 8; /* Red bit offset */
params->ip.ofs1 = 16; /* Green bit offset */
params->ip.ofs2 = 24; /* Blue bit offset */
params->ip.ofs3 = 0; /* Alpha bit offset */
params->ip.wid0 = 7; /* Red bit width - 1 */
params->ip.wid1 = 7; /* Green bit width - 1 */
params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */
break;
case IPU_PIX_FMT_UYVY: case IPU_PIX_FMT_UYVY:
params->ip.bpp = 2; params->ip.bpp = 2;
params->ip.pfs = 6; params->ip.pfs = 6;
......
/*
* Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
* Copyright (C) Semihalf 2009
*
* Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
* (defines, structures and comments) was taken from MPC5121 DMA driver
* written by Hongjun Chen <hong-jun.chen@freescale.com>.
*
* Approved as OSADL project by a majority of OSADL members and funded
* by OSADL membership fees in 2009; for details see www.osadl.org.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
/*
* This is initial version of MPC5121 DMA driver. Only memory to memory
* transfers are supported (tested using dmatest module).
*/
#include <linux/module.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/random.h>
/* Number of DMA Transfer descriptors allocated per channel */
#define MPC_DMA_DESCRIPTORS 64
/* Macro definitions */
#define MPC_DMA_CHANNELS 64
#define MPC_DMA_TCD_OFFSET 0x1000
/* Arbitration mode of group and channel */
#define MPC_DMA_DMACR_EDCG (1 << 31)
#define MPC_DMA_DMACR_ERGA (1 << 3)
#define MPC_DMA_DMACR_ERCA (1 << 2)
/* Error codes */
#define MPC_DMA_DMAES_VLD (1 << 31)
#define MPC_DMA_DMAES_GPE (1 << 15)
#define MPC_DMA_DMAES_CPE (1 << 14)
#define MPC_DMA_DMAES_ERRCHN(err) \
(((err) >> 8) & 0x3f)
#define MPC_DMA_DMAES_SAE (1 << 7)
#define MPC_DMA_DMAES_SOE (1 << 6)
#define MPC_DMA_DMAES_DAE (1 << 5)
#define MPC_DMA_DMAES_DOE (1 << 4)
#define MPC_DMA_DMAES_NCE (1 << 3)
#define MPC_DMA_DMAES_SGE (1 << 2)
#define MPC_DMA_DMAES_SBE (1 << 1)
#define MPC_DMA_DMAES_DBE (1 << 0)
#define MPC_DMA_TSIZE_1 0x00
#define MPC_DMA_TSIZE_2 0x01
#define MPC_DMA_TSIZE_4 0x02
#define MPC_DMA_TSIZE_16 0x04
#define MPC_DMA_TSIZE_32 0x05
/* MPC5121 DMA engine registers */
struct __attribute__ ((__packed__)) mpc_dma_regs {
/* 0x00 */
u32 dmacr; /* DMA control register */
u32 dmaes; /* DMA error status */
/* 0x08 */
u32 dmaerqh; /* DMA enable request high(channels 63~32) */
u32 dmaerql; /* DMA enable request low(channels 31~0) */
u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
/* 0x18 */
u8 dmaserq; /* DMA set enable request */
u8 dmacerq; /* DMA clear enable request */
u8 dmaseei; /* DMA set enable error interrupt */
u8 dmaceei; /* DMA clear enable error interrupt */
/* 0x1c */
u8 dmacint; /* DMA clear interrupt request */
u8 dmacerr; /* DMA clear error */
u8 dmassrt; /* DMA set start bit */
u8 dmacdne; /* DMA clear DONE status bit */
/* 0x20 */
u32 dmainth; /* DMA interrupt request high(ch63~32) */
u32 dmaintl; /* DMA interrupt request low(ch31~0) */
u32 dmaerrh; /* DMA error high(ch63~32) */
u32 dmaerrl; /* DMA error low(ch31~0) */
/* 0x30 */
u32 dmahrsh; /* DMA hw request status high(ch63~32) */
u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
/* 0x40 ~ 0xff */
u32 reserve0[48]; /* Reserved */
/* 0x100 */
u8 dchpri[MPC_DMA_CHANNELS];
/* DMA channels(0~63) priority */
};
struct __attribute__ ((__packed__)) mpc_dma_tcd {
/* 0x00 */
u32 saddr; /* Source address */
u32 smod:5; /* Source address modulo */
u32 ssize:3; /* Source data transfer size */
u32 dmod:5; /* Destination address modulo */
u32 dsize:3; /* Destination data transfer size */
u32 soff:16; /* Signed source address offset */
/* 0x08 */
u32 nbytes; /* Inner "minor" byte count */
u32 slast; /* Last source address adjustment */
u32 daddr; /* Destination address */
/* 0x14 */
u32 citer_elink:1; /* Enable channel-to-channel linking on
* minor loop complete
*/
u32 citer_linkch:6; /* Link channel for minor loop complete */
u32 citer:9; /* Current "major" iteration count */
u32 doff:16; /* Signed destination address offset */
/* 0x18 */
u32 dlast_sga; /* Last Destination address adjustment/scatter
* gather address
*/
/* 0x1c */
u32 biter_elink:1; /* Enable channel-to-channel linking on major
* loop complete
*/
u32 biter_linkch:6;
u32 biter:9; /* Beginning "major" iteration count */
u32 bwc:2; /* Bandwidth control */
u32 major_linkch:6; /* Link channel number */
u32 done:1; /* Channel done */
u32 active:1; /* Channel active */
u32 major_elink:1; /* Enable channel-to-channel linking on major
* loop complete
*/
u32 e_sg:1; /* Enable scatter/gather processing */
u32 d_req:1; /* Disable request */
u32 int_half:1; /* Enable an interrupt when major counter is
* half complete
*/
u32 int_maj:1; /* Enable an interrupt when major iteration
* count completes
*/
u32 start:1; /* Channel start */
};
struct mpc_dma_desc {
struct dma_async_tx_descriptor desc;
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
int error;
struct list_head node;
};
struct mpc_dma_chan {
struct dma_chan chan;
struct list_head free;
struct list_head prepared;
struct list_head queued;
struct list_head active;
struct list_head completed;
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
dma_cookie_t completed_cookie;
/* Lock for this structure */
spinlock_t lock;
};
struct mpc_dma {
struct dma_device dma;
struct tasklet_struct tasklet;
struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
struct mpc_dma_regs __iomem *regs;
struct mpc_dma_tcd __iomem *tcd;
int irq;
uint error_status;
/* Lock for error_status field in this structure */
spinlock_t error_status_lock;
};
#define DRV_NAME "mpc512x_dma"
/* Convert struct dma_chan to struct mpc_dma_chan */
static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
{
return container_of(c, struct mpc_dma_chan, chan);
}
/* Convert struct dma_chan to struct mpc_dma */
static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
}
/*
* Execute all queued DMA descriptors.
*
* Following requirements must be met while calling mpc_dma_execute():
* a) mchan->lock is acquired,
* b) mchan->active list is empty,
* c) mchan->queued list contains at least one entry.
*/
static void mpc_dma_execute(struct mpc_dma_chan *mchan)
{
struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
struct mpc_dma_desc *first = NULL;
struct mpc_dma_desc *prev = NULL;
struct mpc_dma_desc *mdesc;
int cid = mchan->chan.chan_id;
/* Move all queued descriptors to active list */
list_splice_tail_init(&mchan->queued, &mchan->active);
/* Chain descriptors into one transaction */
list_for_each_entry(mdesc, &mchan->active, node) {
if (!first)
first = mdesc;
if (!prev) {
prev = mdesc;
continue;
}
prev->tcd->dlast_sga = mdesc->tcd_paddr;
prev->tcd->e_sg = 1;
mdesc->tcd->start = 1;
prev = mdesc;
}
prev->tcd->start = 0;
prev->tcd->int_maj = 1;
/* Send first descriptor in chain into hardware */
memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
out_8(&mdma->regs->dmassrt, cid);
}
/* Handle interrupt on one half of DMA controller (32 channels) */
static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
{
struct mpc_dma_chan *mchan;
struct mpc_dma_desc *mdesc;
u32 status = is | es;
int ch;
while ((ch = fls(status) - 1) >= 0) {
status &= ~(1 << ch);
mchan = &mdma->channels[ch + off];
spin_lock(&mchan->lock);
/* Check error status */
if (es & (1 << ch))
list_for_each_entry(mdesc, &mchan->active, node)
mdesc->error = -EIO;
/* Execute queued descriptors */
list_splice_tail_init(&mchan->active, &mchan->completed);
if (!list_empty(&mchan->queued))
mpc_dma_execute(mchan);
spin_unlock(&mchan->lock);
}
}
/* Interrupt handler */
static irqreturn_t mpc_dma_irq(int irq, void *data)
{
struct mpc_dma *mdma = data;
uint es;
/* Save error status register */
es = in_be32(&mdma->regs->dmaes);
spin_lock(&mdma->error_status_lock);
if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
mdma->error_status = es;
spin_unlock(&mdma->error_status_lock);
/* Handle interrupt on each channel */
mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
in_be32(&mdma->regs->dmaerrh), 32);
mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
in_be32(&mdma->regs->dmaerrl), 0);
/* Ack interrupt on all channels */
out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
/* Schedule tasklet */
tasklet_schedule(&mdma->tasklet);
return IRQ_HANDLED;
}
/* DMA Tasklet */
static void mpc_dma_tasklet(unsigned long data)
{
struct mpc_dma *mdma = (void *)data;
dma_cookie_t last_cookie = 0;
struct mpc_dma_chan *mchan;
struct mpc_dma_desc *mdesc;
struct dma_async_tx_descriptor *desc;
unsigned long flags;
LIST_HEAD(list);
uint es;
int i;
spin_lock_irqsave(&mdma->error_status_lock, flags);
es = mdma->error_status;
mdma->error_status = 0;
spin_unlock_irqrestore(&mdma->error_status_lock, flags);
/* Print nice error report */
if (es) {
dev_err(mdma->dma.dev,
"Hardware reported following error(s) on channel %u:\n",
MPC_DMA_DMAES_ERRCHN(es));
if (es & MPC_DMA_DMAES_GPE)
dev_err(mdma->dma.dev, "- Group Priority Error\n");
if (es & MPC_DMA_DMAES_CPE)
dev_err(mdma->dma.dev, "- Channel Priority Error\n");
if (es & MPC_DMA_DMAES_SAE)
dev_err(mdma->dma.dev, "- Source Address Error\n");
if (es & MPC_DMA_DMAES_SOE)
dev_err(mdma->dma.dev, "- Source Offset"
" Configuration Error\n");
if (es & MPC_DMA_DMAES_DAE)
dev_err(mdma->dma.dev, "- Destination Address"
" Error\n");
if (es & MPC_DMA_DMAES_DOE)
dev_err(mdma->dma.dev, "- Destination Offset"
" Configuration Error\n");
if (es & MPC_DMA_DMAES_NCE)
dev_err(mdma->dma.dev, "- NBytes/Citter"
" Configuration Error\n");
if (es & MPC_DMA_DMAES_SGE)
dev_err(mdma->dma.dev, "- Scatter/Gather"
" Configuration Error\n");
if (es & MPC_DMA_DMAES_SBE)
dev_err(mdma->dma.dev, "- Source Bus Error\n");
if (es & MPC_DMA_DMAES_DBE)
dev_err(mdma->dma.dev, "- Destination Bus Error\n");
}
for (i = 0; i < mdma->dma.chancnt; i++) {
mchan = &mdma->channels[i];
/* Get all completed descriptors */
spin_lock_irqsave(&mchan->lock, flags);
if (!list_empty(&mchan->completed))
list_splice_tail_init(&mchan->completed, &list);
spin_unlock_irqrestore(&mchan->lock, flags);
if (list_empty(&list))
continue;
/* Execute callbacks and run dependencies */
list_for_each_entry(mdesc, &list, node) {
desc = &mdesc->desc;
if (desc->callback)
desc->callback(desc->callback_param);
last_cookie = desc->cookie;
dma_run_dependencies(desc);
}
/* Free descriptors */
spin_lock_irqsave(&mchan->lock, flags);
list_splice_tail_init(&list, &mchan->free);
mchan->completed_cookie = last_cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
}
}
/* Submit descriptor to hardware */
static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
struct mpc_dma_desc *mdesc;
unsigned long flags;
dma_cookie_t cookie;
mdesc = container_of(txd, struct mpc_dma_desc, desc);
spin_lock_irqsave(&mchan->lock, flags);
/* Move descriptor to queue */
list_move_tail(&mdesc->node, &mchan->queued);
/* If channel is idle, execute all queued descriptors */
if (list_empty(&mchan->active))
mpc_dma_execute(mchan);
/* Update cookie */
cookie = mchan->chan.cookie + 1;
if (cookie <= 0)
cookie = 1;
mchan->chan.cookie = cookie;
mdesc->desc.cookie = cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
return cookie;
}
/* Alloc channel resources */
static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
struct mpc_dma_desc *mdesc;
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
unsigned long flags;
LIST_HEAD(descs);
int i;
/* Alloc DMA memory for Transfer Control Descriptors */
tcd = dma_alloc_coherent(mdma->dma.dev,
MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
&tcd_paddr, GFP_KERNEL);
if (!tcd)
return -ENOMEM;
/* Alloc descriptors for this channel */
for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
if (!mdesc) {
dev_notice(mdma->dma.dev, "Memory allocation error. "
"Allocated only %u descriptors\n", i);
break;
}
dma_async_tx_descriptor_init(&mdesc->desc, chan);
mdesc->desc.flags = DMA_CTRL_ACK;
mdesc->desc.tx_submit = mpc_dma_tx_submit;
mdesc->tcd = &tcd[i];
mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
list_add_tail(&mdesc->node, &descs);
}
/* Return error only if no descriptors were allocated */
if (i == 0) {
dma_free_coherent(mdma->dma.dev,
MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
tcd, tcd_paddr);
return -ENOMEM;
}
spin_lock_irqsave(&mchan->lock, flags);
mchan->tcd = tcd;
mchan->tcd_paddr = tcd_paddr;
list_splice_tail_init(&descs, &mchan->free);
spin_unlock_irqrestore(&mchan->lock, flags);
/* Enable Error Interrupt */
out_8(&mdma->regs->dmaseei, chan->chan_id);
return 0;
}
/* Free channel resources */
static void mpc_dma_free_chan_resources(struct dma_chan *chan)
{
struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
struct mpc_dma_desc *mdesc, *tmp;
struct mpc_dma_tcd *tcd;
dma_addr_t tcd_paddr;
unsigned long flags;
LIST_HEAD(descs);
spin_lock_irqsave(&mchan->lock, flags);
/* Channel must be idle */
BUG_ON(!list_empty(&mchan->prepared));
BUG_ON(!list_empty(&mchan->queued));
BUG_ON(!list_empty(&mchan->active));
BUG_ON(!list_empty(&mchan->completed));
/* Move data */
list_splice_tail_init(&mchan->free, &descs);
tcd = mchan->tcd;
tcd_paddr = mchan->tcd_paddr;
spin_unlock_irqrestore(&mchan->lock, flags);
/* Free DMA memory used by descriptors */
dma_free_coherent(mdma->dma.dev,
MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
tcd, tcd_paddr);
/* Free descriptors */
list_for_each_entry_safe(mdesc, tmp, &descs, node)
kfree(mdesc);
/* Disable Error Interrupt */
out_8(&mdma->regs->dmaceei, chan->chan_id);
}
/* Send all pending descriptor to hardware */
static void mpc_dma_issue_pending(struct dma_chan *chan)
{
/*
* We are posting descriptors to the hardware as soon as
* they are ready, so this function does nothing.
*/
}
/* Check request completion status */
static enum dma_status
mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
unsigned long flags;
dma_cookie_t last_used;
dma_cookie_t last_complete;
spin_lock_irqsave(&mchan->lock, flags);
last_used = mchan->chan.cookie;
last_complete = mchan->completed_cookie;
spin_unlock_irqrestore(&mchan->lock, flags);
if (done)
*done = last_complete;
if (used)
*used = last_used;
return dma_async_is_complete(cookie, last_complete, last_used);
}
/* Prepare descriptor for memory to memory copy */
static struct dma_async_tx_descriptor *
mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags)
{
struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
struct mpc_dma_desc *mdesc = NULL;
struct mpc_dma_tcd *tcd;
unsigned long iflags;
/* Get free descriptor */
spin_lock_irqsave(&mchan->lock, iflags);
if (!list_empty(&mchan->free)) {
mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
node);
list_del(&mdesc->node);
}
spin_unlock_irqrestore(&mchan->lock, iflags);
if (!mdesc)
return NULL;
mdesc->error = 0;
tcd = mdesc->tcd;
/* Prepare Transfer Control Descriptor for this transaction */
memset(tcd, 0, sizeof(struct mpc_dma_tcd));
if (IS_ALIGNED(src | dst | len, 32)) {
tcd->ssize = MPC_DMA_TSIZE_32;
tcd->dsize = MPC_DMA_TSIZE_32;
tcd->soff = 32;
tcd->doff = 32;
} else if (IS_ALIGNED(src | dst | len, 16)) {
tcd->ssize = MPC_DMA_TSIZE_16;
tcd->dsize = MPC_DMA_TSIZE_16;
tcd->soff = 16;
tcd->doff = 16;
} else if (IS_ALIGNED(src | dst | len, 4)) {
tcd->ssize = MPC_DMA_TSIZE_4;
tcd->dsize = MPC_DMA_TSIZE_4;
tcd->soff = 4;
tcd->doff = 4;
} else if (IS_ALIGNED(src | dst | len, 2)) {
tcd->ssize = MPC_DMA_TSIZE_2;
tcd->dsize = MPC_DMA_TSIZE_2;
tcd->soff = 2;
tcd->doff = 2;
} else {
tcd->ssize = MPC_DMA_TSIZE_1;
tcd->dsize = MPC_DMA_TSIZE_1;
tcd->soff = 1;
tcd->doff = 1;
}
tcd->saddr = src;
tcd->daddr = dst;
tcd->nbytes = len;
tcd->biter = 1;
tcd->citer = 1;
/* Place descriptor in prepared list */
spin_lock_irqsave(&mchan->lock, iflags);
list_add_tail(&mdesc->node, &mchan->prepared);
spin_unlock_irqrestore(&mchan->lock, iflags);
return &mdesc->desc;
}
static int __devinit mpc_dma_probe(struct of_device *op,
const struct of_device_id *match)
{
struct device_node *dn = op->node;
struct device *dev = &op->dev;
struct dma_device *dma;
struct mpc_dma *mdma;
struct mpc_dma_chan *mchan;
struct resource res;
ulong regs_start, regs_size;
int retval, i;
mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
if (!mdma) {
dev_err(dev, "Memory exhausted!\n");
return -ENOMEM;
}
mdma->irq = irq_of_parse_and_map(dn, 0);
if (mdma->irq == NO_IRQ) {
dev_err(dev, "Error mapping IRQ!\n");
return -EINVAL;
}
retval = of_address_to_resource(dn, 0, &res);
if (retval) {
dev_err(dev, "Error parsing memory region!\n");
return retval;
}
regs_start = res.start;
regs_size = res.end - res.start + 1;
if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
dev_err(dev, "Error requesting memory region!\n");
return -EBUSY;
}
mdma->regs = devm_ioremap(dev, regs_start, regs_size);
if (!mdma->regs) {
dev_err(dev, "Error mapping memory region!\n");
return -ENOMEM;
}
mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
+ MPC_DMA_TCD_OFFSET);
retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
mdma);
if (retval) {
dev_err(dev, "Error requesting IRQ!\n");
return -EINVAL;
}
spin_lock_init(&mdma->error_status_lock);
dma = &mdma->dma;
dma->dev = dev;
dma->chancnt = MPC_DMA_CHANNELS;
dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
dma->device_free_chan_resources = mpc_dma_free_chan_resources;
dma->device_issue_pending = mpc_dma_issue_pending;
dma->device_is_tx_complete = mpc_dma_is_tx_complete;
dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
INIT_LIST_HEAD(&dma->channels);
dma_cap_set(DMA_MEMCPY, dma->cap_mask);
for (i = 0; i < dma->chancnt; i++) {
mchan = &mdma->channels[i];
mchan->chan.device = dma;
mchan->chan.chan_id = i;
mchan->chan.cookie = 1;
mchan->completed_cookie = mchan->chan.cookie;
INIT_LIST_HEAD(&mchan->free);
INIT_LIST_HEAD(&mchan->prepared);
INIT_LIST_HEAD(&mchan->queued);
INIT_LIST_HEAD(&mchan->active);
INIT_LIST_HEAD(&mchan->completed);
spin_lock_init(&mchan->lock);
list_add_tail(&mchan->chan.device_node, &dma->channels);
}
tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
/*
* Configure DMA Engine:
* - Dynamic clock,
* - Round-robin group arbitration,
* - Round-robin channel arbitration.
*/
out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
/* Disable hardware DMA requests */
out_be32(&mdma->regs->dmaerqh, 0);
out_be32(&mdma->regs->dmaerql, 0);
/* Disable error interrupts */
out_be32(&mdma->regs->dmaeeih, 0);
out_be32(&mdma->regs->dmaeeil, 0);
/* Clear interrupts status */
out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
/* Route interrupts to IPIC */
out_be32(&mdma->regs->dmaihsa, 0);
out_be32(&mdma->regs->dmailsa, 0);
/* Register DMA engine */
dev_set_drvdata(dev, mdma);
retval = dma_async_device_register(dma);
if (retval) {
devm_free_irq(dev, mdma->irq, mdma);
irq_dispose_mapping(mdma->irq);
}
return retval;
}
static int __devexit mpc_dma_remove(struct of_device *op)
{
struct device *dev = &op->dev;
struct mpc_dma *mdma = dev_get_drvdata(dev);
dma_async_device_unregister(&mdma->dma);
devm_free_irq(dev, mdma->irq, mdma);
irq_dispose_mapping(mdma->irq);
return 0;
}
static struct of_device_id mpc_dma_match[] = {
{ .compatible = "fsl,mpc5121-dma", },
{},
};
static struct of_platform_driver mpc_dma_driver = {
.match_table = mpc_dma_match,
.probe = mpc_dma_probe,
.remove = __devexit_p(mpc_dma_remove),
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
},
};
static int __init mpc_dma_init(void)
{
return of_register_platform_driver(&mpc_dma_driver);
}
module_init(mpc_dma_init);
static void __exit mpc_dma_exit(void)
{
of_unregister_platform_driver(&mpc_dma_driver);
}
module_exit(mpc_dma_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
...@@ -4940,7 +4940,7 @@ static int ppc440spe_configure_raid_devices(void) ...@@ -4940,7 +4940,7 @@ static int ppc440spe_configure_raid_devices(void)
return ret; return ret;
} }
static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = { static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
{ .compatible = "ibm,dma-440spe", }, { .compatible = "ibm,dma-440spe", },
{ .compatible = "amcc,xor-accelerator", }, { .compatible = "amcc,xor-accelerator", },
{}, {},
......
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
* if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
*/ */
typedef s32 dma_cookie_t; typedef s32 dma_cookie_t;
#define DMA_MIN_COOKIE 1
#define DMA_MAX_COOKIE INT_MAX
#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment