Commit ef08e782 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma

Pull slave-dmaengine update from Vinod Koul:
 "This includes the cookie cleanup by Russell, the addition of context
  parameter for dmaengine APIs, more arm dmaengine driver cleanup by
  moving code to dmaengine, this time for imx by Javier and pl330 by
  Boojin along with the usual driver fixes."

Fix up some fairly trivial conflicts with various other cleanups.

* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (67 commits)
  dmaengine: imx: fix the build failure on x86_64
  dmaengine: i.MX: Fix merge of cookie branch.
  dmaengine: i.MX: Add support for interleaved transfers.
  dmaengine: imx-dma: use 'dev_dbg' and 'dev_warn' for messages.
  dmaengine: imx-dma: remove 'imx_dmav1_baseaddr' and 'dma_clk'.
  dmaengine: imx-dma: remove unused arg of imxdma_sg_next.
  dmaengine: imx-dma: remove internal structure.
  dmaengine: imx-dma: remove 'resbytes' field of 'internal' structure.
  dmaengine: imx-dma: remove 'in_use' field of 'internal' structure.
  dmaengine: imx-dma: remove sg member from internal structure.
  dmaengine: imx-dma: remove 'imxdma_setup_sg_hw' function.
  dmaengine: imx-dma: remove 'imxdma_config_channel_hw' function.
  dmaengine: imx-dma: remove 'imxdma_setup_mem2mem_hw' function.
  dmaengine: imx-dma: remove dma_mode member of internal structure.
  dmaengine: imx-dma: remove data member from internal structure.
  dmaengine: imx-dma: merge old dma-v1.c with imx-dma.c
  dmaengine: at_hdmac: add slave config operation
  dmaengine: add context parameter to prep_slave_sg and prep_dma_cyclic
  dmaengine/dma_slave: introduce inline wrappers
  dma: imx-sdma: Treat firmware messages as warnings instead of erros
  ...
parents 71db34fc 5b2e02e4
...@@ -24,9 +24,6 @@ config ARM_VIC_NR ...@@ -24,9 +24,6 @@ config ARM_VIC_NR
config ICST config ICST
bool bool
config PL330
bool
config SA1111 config SA1111
bool bool
select DMABOUNCE if !ARCH_PXA select DMABOUNCE if !ARCH_PXA
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
obj-$(CONFIG_ARM_GIC) += gic.o obj-$(CONFIG_ARM_GIC) += gic.o
obj-$(CONFIG_ARM_VIC) += vic.o obj-$(CONFIG_ARM_VIC) += vic.o
obj-$(CONFIG_ICST) += icst.o obj-$(CONFIG_ICST) += icst.o
obj-$(CONFIG_PL330) += pl330.o
obj-$(CONFIG_SA1111) += sa1111.o obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o obj-$(CONFIG_DMABOUNCE) += dmabounce.o
......
This diff is collapsed.
...@@ -49,7 +49,6 @@ struct iop_adma_device { ...@@ -49,7 +49,6 @@ struct iop_adma_device {
/** /**
* struct iop_adma_chan - internal representation of an ADMA device * struct iop_adma_chan - internal representation of an ADMA device
* @pending: allows batching of hardware operations * @pending: allows batching of hardware operations
* @completed_cookie: identifier for the most recently completed operation
* @lock: serializes enqueue/dequeue operations to the slot pool * @lock: serializes enqueue/dequeue operations to the slot pool
* @mmr_base: memory mapped register base * @mmr_base: memory mapped register base
* @chain: device chain view of the descriptors * @chain: device chain view of the descriptors
...@@ -62,7 +61,6 @@ struct iop_adma_device { ...@@ -62,7 +61,6 @@ struct iop_adma_device {
*/ */
struct iop_adma_chan { struct iop_adma_chan {
int pending; int pending;
dma_cookie_t completed_cookie;
spinlock_t lock; /* protects the descriptor slot pool */ spinlock_t lock; /* protects the descriptor slot pool */
void __iomem *mmr_base; void __iomem *mmr_base;
struct list_head chain; struct list_head chain;
......
/* linux/include/asm/hardware/pl330.h
*
* Copyright (C) 2010 Samsung Electronics Co. Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __PL330_CORE_H
#define __PL330_CORE_H
#define PL330_MAX_CHAN 8
#define PL330_MAX_IRQS 32
#define PL330_MAX_PERI 32
enum pl330_srccachectrl {
SCCTRL0 = 0, /* Noncacheable and nonbufferable */
SCCTRL1, /* Bufferable only */
SCCTRL2, /* Cacheable, but do not allocate */
SCCTRL3, /* Cacheable and bufferable, but do not allocate */
SINVALID1,
SINVALID2,
SCCTRL6, /* Cacheable write-through, allocate on reads only */
SCCTRL7, /* Cacheable write-back, allocate on reads only */
};
enum pl330_dstcachectrl {
DCCTRL0 = 0, /* Noncacheable and nonbufferable */
DCCTRL1, /* Bufferable only */
DCCTRL2, /* Cacheable, but do not allocate */
DCCTRL3, /* Cacheable and bufferable, but do not allocate */
DINVALID1, /* AWCACHE = 0x1000 */
DINVALID2,
DCCTRL6, /* Cacheable write-through, allocate on writes only */
DCCTRL7, /* Cacheable write-back, allocate on writes only */
};
/* Populated by the PL330 core driver for DMA API driver's info */
struct pl330_config {
u32 periph_id;
u32 pcell_id;
#define DMAC_MODE_NS (1 << 0)
unsigned int mode;
unsigned int data_bus_width:10; /* In number of bits */
unsigned int data_buf_dep:10;
unsigned int num_chan:4;
unsigned int num_peri:6;
u32 peri_ns;
unsigned int num_events:6;
u32 irq_ns;
};
/* Handle to the DMAC provided to the PL330 core */
struct pl330_info {
/* Owning device */
struct device *dev;
/* Size of MicroCode buffers for each channel. */
unsigned mcbufsz;
/* ioremap'ed address of PL330 registers. */
void __iomem *base;
/* Client can freely use it. */
void *client_data;
/* PL330 core data, Client must not touch it. */
void *pl330_data;
/* Populated by the PL330 core driver during pl330_add */
struct pl330_config pcfg;
/*
* If the DMAC has some reset mechanism, then the
* client may want to provide pointer to the method.
*/
void (*dmac_reset)(struct pl330_info *pi);
};
enum pl330_byteswap {
SWAP_NO = 0,
SWAP_2,
SWAP_4,
SWAP_8,
SWAP_16,
};
/**
* Request Configuration.
* The PL330 core does not modify this and uses the last
* working configuration if the request doesn't provide any.
*
* The Client may want to provide this info only for the
* first request and a request with new settings.
*/
struct pl330_reqcfg {
/* Address Incrementing */
unsigned dst_inc:1;
unsigned src_inc:1;
/*
* For now, the SRC & DST protection levels
* and burst size/length are assumed same.
*/
bool nonsecure;
bool privileged;
bool insnaccess;
unsigned brst_len:5;
unsigned brst_size:3; /* in power of 2 */
enum pl330_dstcachectrl dcctl;
enum pl330_srccachectrl scctl;
enum pl330_byteswap swap;
};
/*
* One cycle of DMAC operation.
* There may be more than one xfer in a request.
*/
struct pl330_xfer {
u32 src_addr;
u32 dst_addr;
/* Size to xfer */
u32 bytes;
/*
* Pointer to next xfer in the list.
* The last xfer in the req must point to NULL.
*/
struct pl330_xfer *next;
};
/* The xfer callbacks are made with one of these arguments. */
enum pl330_op_err {
/* The all xfers in the request were success. */
PL330_ERR_NONE,
/* If req aborted due to global error. */
PL330_ERR_ABORT,
/* If req failed due to problem with Channel. */
PL330_ERR_FAIL,
};
enum pl330_reqtype {
MEMTOMEM,
MEMTODEV,
DEVTOMEM,
DEVTODEV,
};
/* A request defining Scatter-Gather List ending with NULL xfer. */
struct pl330_req {
enum pl330_reqtype rqtype;
/* Index of peripheral for the xfer. */
unsigned peri:5;
/* Unique token for this xfer, set by the client. */
void *token;
/* Callback to be called after xfer. */
void (*xfer_cb)(void *token, enum pl330_op_err err);
/* If NULL, req will be done at last set parameters. */
struct pl330_reqcfg *cfg;
/* Pointer to first xfer in the request. */
struct pl330_xfer *x;
};
/*
* To know the status of the channel and DMAC, the client
* provides a pointer to this structure. The PL330 core
* fills it with current information.
*/
struct pl330_chanstatus {
/*
* If the DMAC engine halted due to some error,
* the client should remove-add DMAC.
*/
bool dmac_halted;
/*
* If channel is halted due to some error,
* the client should ABORT/FLUSH and START the channel.
*/
bool faulting;
/* Location of last load */
u32 src_addr;
/* Location of last store */
u32 dst_addr;
/*
* Pointer to the currently active req, NULL if channel is
* inactive, even though the requests may be present.
*/
struct pl330_req *top_req;
/* Pointer to req waiting second in the queue if any. */
struct pl330_req *wait_req;
};
enum pl330_chan_op {
/* Start the channel */
PL330_OP_START,
/* Abort the active xfer */
PL330_OP_ABORT,
/* Stop xfer and flush queue */
PL330_OP_FLUSH,
};
extern int pl330_add(struct pl330_info *);
extern void pl330_del(struct pl330_info *pi);
extern int pl330_update(const struct pl330_info *pi);
extern void pl330_release_channel(void *ch_id);
extern void *pl330_request_channel(const struct pl330_info *pi);
extern int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus);
extern int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op);
extern int pl330_submit_req(void *ch_id, struct pl330_req *r);
#endif /* __PL330_CORE_H */
...@@ -437,7 +437,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data) ...@@ -437,7 +437,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
/* DMA slave channel configuration */ /* DMA slave channel configuration */
atslave->dma_dev = &at_hdmac_device.dev; atslave->dma_dev = &at_hdmac_device.dev;
atslave->reg_width = AT_DMA_SLAVE_WIDTH_32BIT;
atslave->cfg = ATC_FIFOCFG_HALFFIFO atslave->cfg = ATC_FIFOCFG_HALFFIFO
| ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW; | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW;
atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16; atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16;
......
...@@ -23,18 +23,6 @@ struct at_dma_platform_data { ...@@ -23,18 +23,6 @@ struct at_dma_platform_data {
dma_cap_mask_t cap_mask; dma_cap_mask_t cap_mask;
}; };
/**
* enum at_dma_slave_width - DMA slave register access width.
* @AT_DMA_SLAVE_WIDTH_8BIT: Do 8-bit slave register accesses
* @AT_DMA_SLAVE_WIDTH_16BIT: Do 16-bit slave register accesses
* @AT_DMA_SLAVE_WIDTH_32BIT: Do 32-bit slave register accesses
*/
enum at_dma_slave_width {
AT_DMA_SLAVE_WIDTH_8BIT = 0,
AT_DMA_SLAVE_WIDTH_16BIT,
AT_DMA_SLAVE_WIDTH_32BIT,
};
/** /**
* struct at_dma_slave - Controller-specific information about a slave * struct at_dma_slave - Controller-specific information about a slave
* @dma_dev: required DMA master device * @dma_dev: required DMA master device
...@@ -48,9 +36,6 @@ enum at_dma_slave_width { ...@@ -48,9 +36,6 @@ enum at_dma_slave_width {
*/ */
struct at_dma_slave { struct at_dma_slave {
struct device *dma_dev; struct device *dma_dev;
dma_addr_t tx_reg;
dma_addr_t rx_reg;
enum at_dma_slave_width reg_width;
u32 cfg; u32 cfg;
u32 ctrla; u32 ctrla;
}; };
......
config IMX_HAVE_DMA_V1
bool
config HAVE_IMX_GPC config HAVE_IMX_GPC
bool bool
...@@ -38,7 +35,6 @@ config SOC_IMX1 ...@@ -38,7 +35,6 @@ config SOC_IMX1
bool bool
select ARCH_MX1 select ARCH_MX1
select CPU_ARM920T select CPU_ARM920T
select IMX_HAVE_DMA_V1
select IMX_HAVE_IOMUX_V1 select IMX_HAVE_IOMUX_V1
select MXC_AVIC select MXC_AVIC
...@@ -46,7 +42,6 @@ config SOC_IMX21 ...@@ -46,7 +42,6 @@ config SOC_IMX21
bool bool
select MACH_MX21 select MACH_MX21
select CPU_ARM926T select CPU_ARM926T
select IMX_HAVE_DMA_V1
select IMX_HAVE_IOMUX_V1 select IMX_HAVE_IOMUX_V1
select MXC_AVIC select MXC_AVIC
...@@ -61,7 +56,6 @@ config SOC_IMX27 ...@@ -61,7 +56,6 @@ config SOC_IMX27
bool bool
select MACH_MX27 select MACH_MX27
select CPU_ARM926T select CPU_ARM926T
select IMX_HAVE_DMA_V1
select IMX_HAVE_IOMUX_V1 select IMX_HAVE_IOMUX_V1
select MXC_AVIC select MXC_AVIC
......
obj-$(CONFIG_IMX_HAVE_DMA_V1) += dma-v1.o
obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o
obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o
......
This diff is collapsed.
/*
* linux/arch/arm/mach-imx/include/mach/dma-v1.h
*
* i.MX DMA registration and IRQ dispatching
*
* Copyright 2006 Pavel Pisa <pisa@cmp.felk.cvut.cz>
* Copyright 2008 Juergen Beisert, <kernel@pengutronix.de>
* Copyright 2008 Sascha Hauer, <s.hauer@pengutronix.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#ifndef __MACH_DMA_V1_H__
#define __MACH_DMA_V1_H__
#define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27())
#include <mach/dma.h>
#define IMX_DMA_CHANNELS 16
#define DMA_MODE_READ 0
#define DMA_MODE_WRITE 1
#define DMA_MODE_MASK 1
#define MX1_DMA_REG(offset) MX1_IO_ADDRESS(MX1_DMA_BASE_ADDR + (offset))
/* DMA Interrupt Mask Register */
#define MX1_DMA_DIMR MX1_DMA_REG(0x08)
/* Channel Control Register */
#define MX1_DMA_CCR(x) MX1_DMA_REG(0x8c + ((x) << 6))
#define IMX_DMA_MEMSIZE_32 (0 << 4)
#define IMX_DMA_MEMSIZE_8 (1 << 4)
#define IMX_DMA_MEMSIZE_16 (2 << 4)
#define IMX_DMA_TYPE_LINEAR (0 << 10)
#define IMX_DMA_TYPE_2D (1 << 10)
#define IMX_DMA_TYPE_FIFO (2 << 10)
#define IMX_DMA_ERR_BURST (1 << 0)
#define IMX_DMA_ERR_REQUEST (1 << 1)
#define IMX_DMA_ERR_TRANSFER (1 << 2)
#define IMX_DMA_ERR_BUFFER (1 << 3)
#define IMX_DMA_ERR_TIMEOUT (1 << 4)
int
imx_dma_config_channel(int channel, unsigned int config_port,
unsigned int config_mem, unsigned int dmareq, int hw_chaining);
void
imx_dma_config_burstlen(int channel, unsigned int burstlen);
int
imx_dma_setup_single(int channel, dma_addr_t dma_address,
unsigned int dma_length, unsigned int dev_addr,
unsigned int dmamode);
/*
* Use this flag as the dma_length argument to imx_dma_setup_sg()
* to create an endless running dma loop. The end of the scatterlist
* must be linked to the beginning for this to work.
*/
#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1)
int
imx_dma_setup_sg(int channel, struct scatterlist *sg,
unsigned int sgcount, unsigned int dma_length,
unsigned int dev_addr, unsigned int dmamode);
int
imx_dma_setup_handlers(int channel,
void (*irq_handler) (int, void *),
void (*err_handler) (int, void *, int), void *data);
int
imx_dma_setup_progression_handler(int channel,
void (*prog_handler) (int, void*, struct scatterlist*));
void imx_dma_enable(int channel);
void imx_dma_disable(int channel);
int imx_dma_request(int channel, const char *name);
void imx_dma_free(int channel);
int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio);
#endif /* __MACH_DMA_V1_H__ */
...@@ -200,8 +200,7 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, ...@@ -200,8 +200,7 @@ dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
sg.dma_address = addr; sg.dma_address = addr;
sg.length = size; sg.length = size;
return chan->device->device_prep_slave_sg(chan, &sg, 1, return dmaengine_prep_slave_sg(chan, &sg, 1, direction, flags);
direction, flags);
} }
#else #else
......
...@@ -79,11 +79,11 @@ static int samsung_dmadev_prepare(unsigned ch, ...@@ -79,11 +79,11 @@ static int samsung_dmadev_prepare(unsigned ch,
info->len, offset_in_page(info->buf)); info->len, offset_in_page(info->buf));
sg_dma_address(&sg) = info->buf; sg_dma_address(&sg) = info->buf;
desc = chan->device->device_prep_slave_sg(chan, desc = dmaengine_prep_slave_sg(chan,
&sg, 1, info->direction, DMA_PREP_INTERRUPT); &sg, 1, info->direction, DMA_PREP_INTERRUPT);
break; break;
case DMA_CYCLIC: case DMA_CYCLIC:
desc = chan->device->device_prep_dma_cyclic(chan, desc = dmaengine_prep_dma_cyclic(chan,
info->buf, info->len, info->period, info->direction); info->buf, info->len, info->period, info->direction);
break; break;
default: default:
......
...@@ -1351,7 +1351,6 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data) ...@@ -1351,7 +1351,6 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
goto fail; goto fail;
slave->sdata.dma_dev = &dw_dmac0_device.dev; slave->sdata.dma_dev = &dw_dmac0_device.dev;
slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0) slave->sdata.cfg_hi = (DWC_CFGH_SRC_PER(0)
| DWC_CFGH_DST_PER(1)); | DWC_CFGH_DST_PER(1));
slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL slave->sdata.cfg_lo &= ~(DWC_CFGL_HS_DST_POL
...@@ -2046,27 +2045,19 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data, ...@@ -2046,27 +2045,19 @@ at32_add_device_ac97c(unsigned int id, struct ac97c_platform_data *data,
/* Check if DMA slave interface for capture should be configured. */ /* Check if DMA slave interface for capture should be configured. */
if (flags & AC97C_CAPTURE) { if (flags & AC97C_CAPTURE) {
rx_dws->dma_dev = &dw_dmac0_device.dev; rx_dws->dma_dev = &dw_dmac0_device.dev;
rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3);
rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
rx_dws->src_master = 0; rx_dws->src_master = 0;
rx_dws->dst_master = 1; rx_dws->dst_master = 1;
rx_dws->src_msize = DW_DMA_MSIZE_1;
rx_dws->dst_msize = DW_DMA_MSIZE_1;
rx_dws->fc = DW_DMA_FC_D_P2M;
} }
/* Check if DMA slave interface for playback should be configured. */ /* Check if DMA slave interface for playback should be configured. */
if (flags & AC97C_PLAYBACK) { if (flags & AC97C_PLAYBACK) {
tx_dws->dma_dev = &dw_dmac0_device.dev; tx_dws->dma_dev = &dw_dmac0_device.dev;
tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT;
tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); tx_dws->cfg_hi = DWC_CFGH_DST_PER(4);
tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
tx_dws->src_master = 0; tx_dws->src_master = 0;
tx_dws->dst_master = 1; tx_dws->dst_master = 1;
tx_dws->src_msize = DW_DMA_MSIZE_1;
tx_dws->dst_msize = DW_DMA_MSIZE_1;
tx_dws->fc = DW_DMA_FC_D_M2P;
} }
if (platform_device_add_data(pdev, data, if (platform_device_add_data(pdev, data,
...@@ -2136,14 +2127,10 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data) ...@@ -2136,14 +2127,10 @@ at32_add_device_abdac(unsigned int id, struct atmel_abdac_pdata *data)
dws = &data->dws; dws = &data->dws;
dws->dma_dev = &dw_dmac0_device.dev; dws->dma_dev = &dw_dmac0_device.dev;
dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
dws->cfg_hi = DWC_CFGH_DST_PER(2); dws->cfg_hi = DWC_CFGH_DST_PER(2);
dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL);
dws->src_master = 0; dws->src_master = 0;
dws->dst_master = 1; dws->dst_master = 1;
dws->src_msize = DW_DMA_MSIZE_1;
dws->dst_msize = DW_DMA_MSIZE_1;
dws->fc = DW_DMA_FC_D_M2P;
if (platform_device_add_data(pdev, data, if (platform_device_add_data(pdev, data,
sizeof(struct atmel_abdac_pdata))) sizeof(struct atmel_abdac_pdata)))
......
...@@ -14,11 +14,4 @@ struct mci_dma_data { ...@@ -14,11 +14,4 @@ struct mci_dma_data {
#define slave_data_ptr(s) (&(s)->sdata) #define slave_data_ptr(s) (&(s)->sdata)
#define find_slave_dev(s) ((s)->sdata.dma_dev) #define find_slave_dev(s) ((s)->sdata.dma_dev)
#define setup_dma_addr(s, t, r) do { \
if (s) { \
(s)->sdata.tx_reg = (t); \
(s)->sdata.rx_reg = (r); \
} \
} while (0)
#endif /* __MACH_ATMEL_MCI_H */ #endif /* __MACH_ATMEL_MCI_H */
...@@ -201,7 +201,6 @@ config PL330_DMA ...@@ -201,7 +201,6 @@ config PL330_DMA
tristate "DMA API Driver for PL330" tristate "DMA API Driver for PL330"
select DMA_ENGINE select DMA_ENGINE
depends on ARM_AMBA depends on ARM_AMBA
select PL330
help help
Select if your platform has one or more PL330 DMACs. Select if your platform has one or more PL330 DMACs.
You need to provide platform specific settings via You need to provide platform specific settings via
...@@ -231,7 +230,7 @@ config IMX_SDMA ...@@ -231,7 +230,7 @@ config IMX_SDMA
config IMX_DMA config IMX_DMA
tristate "i.MX DMA support" tristate "i.MX DMA support"
depends on IMX_HAVE_DMA_V1 depends on ARCH_MXC
select DMA_ENGINE select DMA_ENGINE
help help
Support the i.MX DMA engine. This engine is integrated into Support the i.MX DMA engine. This engine is integrated into
......
...@@ -85,6 +85,8 @@ ...@@ -85,6 +85,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/hardware/pl080.h> #include <asm/hardware/pl080.h>
#include "dmaengine.h"
#define DRIVER_NAME "pl08xdmac" #define DRIVER_NAME "pl08xdmac"
static struct amba_driver pl08x_amba_driver; static struct amba_driver pl08x_amba_driver;
...@@ -649,7 +651,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, ...@@ -649,7 +651,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
} }
if ((bd.srcbus.addr % bd.srcbus.buswidth) || if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
(bd.srcbus.addr % bd.srcbus.buswidth)) { (bd.dstbus.addr % bd.dstbus.buswidth)) {
dev_err(&pl08x->adev->dev, dev_err(&pl08x->adev->dev,
"%s src & dst address must be aligned to src" "%s src & dst address must be aligned to src"
" & dst width if peripheral is flow controller", " & dst width if peripheral is flow controller",
...@@ -919,13 +921,10 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -919,13 +921,10 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
struct pl08x_txd *txd = to_pl08x_txd(tx); struct pl08x_txd *txd = to_pl08x_txd(tx);
unsigned long flags; unsigned long flags;
dma_cookie_t cookie;
spin_lock_irqsave(&plchan->lock, flags); spin_lock_irqsave(&plchan->lock, flags);
cookie = dma_cookie_assign(tx);
plchan->chan.cookie += 1;
if (plchan->chan.cookie < 0)
plchan->chan.cookie = 1;
tx->cookie = plchan->chan.cookie;
/* Put this onto the pending list */ /* Put this onto the pending list */
list_add_tail(&txd->node, &plchan->pend_list); list_add_tail(&txd->node, &plchan->pend_list);
...@@ -945,7 +944,7 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -945,7 +944,7 @@ static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
spin_unlock_irqrestore(&plchan->lock, flags); spin_unlock_irqrestore(&plchan->lock, flags);
return tx->cookie; return cookie;
} }
static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
...@@ -965,31 +964,17 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, ...@@ -965,31 +964,17 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *txstate) dma_cookie_t cookie, struct dma_tx_state *txstate)
{ {
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
enum dma_status ret; enum dma_status ret;
u32 bytesleft = 0;
last_used = plchan->chan.cookie; ret = dma_cookie_status(chan, cookie, txstate);
last_complete = plchan->lc; if (ret == DMA_SUCCESS)
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret == DMA_SUCCESS) {
dma_set_tx_state(txstate, last_complete, last_used, 0);
return ret; return ret;
}
/* /*
* This cookie not complete yet * This cookie not complete yet
* Get number of bytes left in the active transactions and queue
*/ */
last_used = plchan->chan.cookie; dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
last_complete = plchan->lc;
/* Get number of bytes left in the active transactions and queue */
bytesleft = pl08x_getbytes_chan(plchan);
dma_set_tx_state(txstate, last_complete, last_used,
bytesleft);
if (plchan->state == PL08X_CHAN_PAUSED) if (plchan->state == PL08X_CHAN_PAUSED)
return DMA_PAUSED; return DMA_PAUSED;
...@@ -1139,6 +1124,8 @@ static int dma_set_runtime_config(struct dma_chan *chan, ...@@ -1139,6 +1124,8 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
plchan->device_fc = config->device_fc;
if (plchan->runtime_direction == DMA_DEV_TO_MEM) { if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
plchan->src_addr = config->src_addr; plchan->src_addr = config->src_addr;
plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
...@@ -1326,7 +1313,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( ...@@ -1326,7 +1313,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction, unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags) unsigned long flags, void *context)
{ {
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_driver_data *pl08x = plchan->host;
...@@ -1370,7 +1357,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( ...@@ -1370,7 +1357,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
return NULL; return NULL;
} }
if (plchan->cd->device_fc) if (plchan->device_fc)
tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER; PL080_FLOW_PER2MEM_PER;
else else
...@@ -1541,7 +1528,7 @@ static void pl08x_tasklet(unsigned long data) ...@@ -1541,7 +1528,7 @@ static void pl08x_tasklet(unsigned long data)
if (txd) { if (txd) {
/* Update last completed */ /* Update last completed */
plchan->lc = txd->tx.cookie; dma_cookie_complete(&txd->tx);
} }
/* If a new descriptor is queued, set it up plchan->at is NULL here */ /* If a new descriptor is queued, set it up plchan->at is NULL here */
...@@ -1722,8 +1709,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, ...@@ -1722,8 +1709,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
chan->name); chan->name);
chan->chan.device = dmadev; chan->chan.device = dmadev;
chan->chan.cookie = 0; dma_cookie_init(&chan->chan);
chan->lc = 0;
spin_lock_init(&chan->lock); spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pend_list); INIT_LIST_HEAD(&chan->pend_list);
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/of_device.h> #include <linux/of_device.h>
#include "at_hdmac_regs.h" #include "at_hdmac_regs.h"
#include "dmaengine.h"
/* /*
* Glossary * Glossary
...@@ -191,27 +192,6 @@ static void atc_desc_chain(struct at_desc **first, struct at_desc **prev, ...@@ -191,27 +192,6 @@ static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
*prev = desc; *prev = desc;
} }
/**
* atc_assign_cookie - compute and assign new cookie
* @atchan: channel we work on
* @desc: descriptor to assign cookie for
*
* Called with atchan->lock held and bh disabled
*/
static dma_cookie_t
atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
{
dma_cookie_t cookie = atchan->chan_common.cookie;
if (++cookie < 0)
cookie = 1;
atchan->chan_common.cookie = cookie;
desc->txd.cookie = cookie;
return cookie;
}
/** /**
* atc_dostart - starts the DMA engine for real * atc_dostart - starts the DMA engine for real
* @atchan: the channel we want to start * @atchan: the channel we want to start
...@@ -269,7 +249,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) ...@@ -269,7 +249,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie); "descriptor %u complete\n", txd->cookie);
atchan->completed_cookie = txd->cookie; dma_cookie_complete(txd);
/* move children to free_list */ /* move children to free_list */
list_splice_init(&desc->tx_list, &atchan->free_list); list_splice_init(&desc->tx_list, &atchan->free_list);
...@@ -547,7 +527,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -547,7 +527,7 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&atchan->lock, flags); spin_lock_irqsave(&atchan->lock, flags);
cookie = atc_assign_cookie(atchan, desc); cookie = dma_cookie_assign(tx);
if (list_empty(&atchan->active_list)) { if (list_empty(&atchan->active_list)) {
dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
...@@ -659,14 +639,16 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, ...@@ -659,14 +639,16 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* @sg_len: number of entries in @scatterlist * @sg_len: number of entries in @scatterlist
* @direction: DMA direction * @direction: DMA direction
* @flags: tx descriptor status flags * @flags: tx descriptor status flags
* @context: transaction context (ignored)
*/ */
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction, unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags) unsigned long flags, void *context)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private; struct at_dma_slave *atslave = chan->private;
struct dma_slave_config *sconfig = &atchan->dma_sconfig;
struct at_desc *first = NULL; struct at_desc *first = NULL;
struct at_desc *prev = NULL; struct at_desc *prev = NULL;
u32 ctrla; u32 ctrla;
...@@ -688,19 +670,18 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -688,19 +670,18 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL; return NULL;
} }
reg_width = atslave->reg_width;
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
ctrlb = ATC_IEN; ctrlb = ATC_IEN;
switch (direction) { switch (direction) {
case DMA_MEM_TO_DEV: case DMA_MEM_TO_DEV:
reg_width = convert_buswidth(sconfig->dst_addr_width);
ctrla |= ATC_DST_WIDTH(reg_width); ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR | ATC_SRC_ADDR_MODE_INCR
| ATC_FC_MEM2PER | ATC_FC_MEM2PER
| ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
reg = atslave->tx_reg; reg = sconfig->dst_addr;
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
struct at_desc *desc; struct at_desc *desc;
u32 len; u32 len;
...@@ -728,13 +709,14 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -728,13 +709,14 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
} }
break; break;
case DMA_DEV_TO_MEM: case DMA_DEV_TO_MEM:
reg_width = convert_buswidth(sconfig->src_addr_width);
ctrla |= ATC_SRC_WIDTH(reg_width); ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED | ATC_SRC_ADDR_MODE_FIXED
| ATC_FC_PER2MEM | ATC_FC_PER2MEM
| ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
reg = atslave->rx_reg; reg = sconfig->src_addr;
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
struct at_desc *desc; struct at_desc *desc;
u32 len; u32 len;
...@@ -810,12 +792,15 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, ...@@ -810,12 +792,15 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
* atc_dma_cyclic_fill_desc - Fill one period decriptor * atc_dma_cyclic_fill_desc - Fill one period decriptor
*/ */
static int static int
atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
unsigned int period_index, dma_addr_t buf_addr, unsigned int period_index, dma_addr_t buf_addr,
size_t period_len, enum dma_transfer_direction direction) unsigned int reg_width, size_t period_len,
enum dma_transfer_direction direction)
{ {
u32 ctrla; struct at_dma_chan *atchan = to_at_dma_chan(chan);
unsigned int reg_width = atslave->reg_width; struct at_dma_slave *atslave = chan->private;
struct dma_slave_config *sconfig = &atchan->dma_sconfig;
u32 ctrla;
/* prepare common CRTLA value */ /* prepare common CRTLA value */
ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
...@@ -826,7 +811,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, ...@@ -826,7 +811,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
switch (direction) { switch (direction) {
case DMA_MEM_TO_DEV: case DMA_MEM_TO_DEV:
desc->lli.saddr = buf_addr + (period_len * period_index); desc->lli.saddr = buf_addr + (period_len * period_index);
desc->lli.daddr = atslave->tx_reg; desc->lli.daddr = sconfig->dst_addr;
desc->lli.ctrla = ctrla; desc->lli.ctrla = ctrla;
desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR | ATC_SRC_ADDR_MODE_INCR
...@@ -836,7 +821,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, ...@@ -836,7 +821,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
break; break;
case DMA_DEV_TO_MEM: case DMA_DEV_TO_MEM:
desc->lli.saddr = atslave->rx_reg; desc->lli.saddr = sconfig->src_addr;
desc->lli.daddr = buf_addr + (period_len * period_index); desc->lli.daddr = buf_addr + (period_len * period_index);
desc->lli.ctrla = ctrla; desc->lli.ctrla = ctrla;
desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
...@@ -860,16 +845,20 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc, ...@@ -860,16 +845,20 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
* @buf_len: total number of bytes for the entire buffer * @buf_len: total number of bytes for the entire buffer
* @period_len: number of bytes for each period * @period_len: number of bytes for each period
* @direction: transfer direction, to or from device * @direction: transfer direction, to or from device
* @context: transfer context (ignored)
*/ */
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
size_t period_len, enum dma_transfer_direction direction) size_t period_len, enum dma_transfer_direction direction,
void *context)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private; struct at_dma_slave *atslave = chan->private;
struct dma_slave_config *sconfig = &atchan->dma_sconfig;
struct at_desc *first = NULL; struct at_desc *first = NULL;
struct at_desc *prev = NULL; struct at_desc *prev = NULL;
unsigned long was_cyclic; unsigned long was_cyclic;
unsigned int reg_width;
unsigned int periods = buf_len / period_len; unsigned int periods = buf_len / period_len;
unsigned int i; unsigned int i;
...@@ -889,8 +878,13 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, ...@@ -889,8 +878,13 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
return NULL; return NULL;
} }
if (sconfig->direction == DMA_MEM_TO_DEV)
reg_width = convert_buswidth(sconfig->dst_addr_width);
else
reg_width = convert_buswidth(sconfig->src_addr_width);
/* Check for too big/unaligned periods and unaligned DMA buffer */ /* Check for too big/unaligned periods and unaligned DMA buffer */
if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr, if (atc_dma_cyclic_check_values(reg_width, buf_addr,
period_len, direction)) period_len, direction))
goto err_out; goto err_out;
...@@ -902,8 +896,8 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, ...@@ -902,8 +896,8 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
if (!desc) if (!desc)
goto err_desc_get; goto err_desc_get;
if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr, if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
period_len, direction)) reg_width, period_len, direction))
goto err_desc_get; goto err_desc_get;
atc_desc_chain(&first, &prev, desc); atc_desc_chain(&first, &prev, desc);
...@@ -926,6 +920,23 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, ...@@ -926,6 +920,23 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
return NULL; return NULL;
} }
static int set_runtime_config(struct dma_chan *chan,
struct dma_slave_config *sconfig)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
/* Check if it is chan is configured for slave transfers */
if (!chan->private)
return -EINVAL;
memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
convert_burst(&atchan->dma_sconfig.src_maxburst);
convert_burst(&atchan->dma_sconfig.dst_maxburst);
return 0;
}
static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
unsigned long arg) unsigned long arg)
...@@ -986,6 +997,8 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -986,6 +997,8 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
clear_bit(ATC_IS_CYCLIC, &atchan->status); clear_bit(ATC_IS_CYCLIC, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
} else if (cmd == DMA_SLAVE_CONFIG) {
return set_runtime_config(chan, (struct dma_slave_config *)arg);
} else { } else {
return -ENXIO; return -ENXIO;
} }
...@@ -1016,26 +1029,20 @@ atc_tx_status(struct dma_chan *chan, ...@@ -1016,26 +1029,20 @@ atc_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&atchan->lock, flags); spin_lock_irqsave(&atchan->lock, flags);
last_complete = atchan->completed_cookie; ret = dma_cookie_status(chan, cookie, txstate);
last_used = chan->cookie;
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) { if (ret != DMA_SUCCESS) {
atc_cleanup_descriptors(atchan); atc_cleanup_descriptors(atchan);
last_complete = atchan->completed_cookie; ret = dma_cookie_status(chan, cookie, txstate);
last_used = chan->cookie;
ret = dma_async_is_complete(cookie, last_complete, last_used);
} }
last_complete = chan->completed_cookie;
last_used = chan->cookie;
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
if (ret != DMA_SUCCESS) if (ret != DMA_SUCCESS)
dma_set_tx_state(txstate, last_complete, last_used, dma_set_residue(txstate, atc_first_active(atchan)->len);
atc_first_active(atchan)->len);
else
dma_set_tx_state(txstate, last_complete, last_used, 0);
if (atc_chan_is_paused(atchan)) if (atc_chan_is_paused(atchan))
ret = DMA_PAUSED; ret = DMA_PAUSED;
...@@ -1129,7 +1136,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan) ...@@ -1129,7 +1136,7 @@ static int atc_alloc_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&atchan->lock, flags); spin_lock_irqsave(&atchan->lock, flags);
atchan->descs_allocated = i; atchan->descs_allocated = i;
list_splice(&tmp_list, &atchan->free_list); list_splice(&tmp_list, &atchan->free_list);
atchan->completed_cookie = chan->cookie = 1; dma_cookie_init(chan);
spin_unlock_irqrestore(&atchan->lock, flags); spin_unlock_irqrestore(&atchan->lock, flags);
/* channel parameters */ /* channel parameters */
...@@ -1329,7 +1336,7 @@ static int __init at_dma_probe(struct platform_device *pdev) ...@@ -1329,7 +1336,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
struct at_dma_chan *atchan = &atdma->chan[i]; struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common; atchan->chan_common.device = &atdma->dma_common;
atchan->chan_common.cookie = atchan->completed_cookie = 1; dma_cookie_init(&atchan->chan_common);
list_add_tail(&atchan->chan_common.device_node, list_add_tail(&atchan->chan_common.device_node,
&atdma->dma_common.channels); &atdma->dma_common.channels);
......
...@@ -207,8 +207,8 @@ enum atc_status { ...@@ -207,8 +207,8 @@ enum atc_status {
* @save_cfg: configuration register that is saved on suspend/resume cycle * @save_cfg: configuration register that is saved on suspend/resume cycle
* @save_dscr: for cyclic operations, preserve next descriptor address in * @save_dscr: for cyclic operations, preserve next descriptor address in
* the cyclic list on suspend/resume cycle * the cyclic list on suspend/resume cycle
* @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG
* @lock: serializes enqueue/dequeue operations to descriptors lists * @lock: serializes enqueue/dequeue operations to descriptors lists
* @completed_cookie: identifier for the most recently completed operation
* @active_list: list of descriptors dmaengine is being running on * @active_list: list of descriptors dmaengine is being running on
* @queue: list of descriptors ready to be submitted to engine * @queue: list of descriptors ready to be submitted to engine
* @free_list: list of descriptors usable by the channel * @free_list: list of descriptors usable by the channel
...@@ -223,11 +223,11 @@ struct at_dma_chan { ...@@ -223,11 +223,11 @@ struct at_dma_chan {
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
u32 save_cfg; u32 save_cfg;
u32 save_dscr; u32 save_dscr;
struct dma_slave_config dma_sconfig;
spinlock_t lock; spinlock_t lock;
/* these other elements are all protected by lock */ /* these other elements are all protected by lock */
dma_cookie_t completed_cookie;
struct list_head active_list; struct list_head active_list;
struct list_head queue; struct list_head queue;
struct list_head free_list; struct list_head free_list;
...@@ -245,6 +245,36 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan) ...@@ -245,6 +245,36 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
return container_of(dchan, struct at_dma_chan, chan_common); return container_of(dchan, struct at_dma_chan, chan_common);
} }
/*
* Fix sconfig's burst size according to at_hdmac. We need to convert them as:
* 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3, 32 -> 4, 64 -> 5, 128 -> 6, 256 -> 7.
*
* This can be done by finding most significant bit set.
*/
static inline void convert_burst(u32 *maxburst)
{
if (*maxburst > 1)
*maxburst = fls(*maxburst) - 2;
else
*maxburst = 0;
}
/*
* Fix sconfig's bus width according to at_hdmac.
* 1 byte -> 0, 2 bytes -> 1, 4 bytes -> 2.
*/
static inline u8 convert_buswidth(enum dma_slave_buswidth addr_width)
{
switch (addr_width) {
case DMA_SLAVE_BUSWIDTH_2_BYTES:
return 1;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
return 2;
default:
/* For 1 byte width or fallback */
return 0;
}
}
/*-- Controller ------------------------------------------------------*/ /*-- Controller ------------------------------------------------------*/
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <mach/coh901318.h> #include <mach/coh901318.h>
#include "coh901318_lli.h" #include "coh901318_lli.h"
#include "dmaengine.h"
#define COHC_2_DEV(cohc) (&cohc->chan.dev->device) #define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
...@@ -59,7 +60,6 @@ struct coh901318_base { ...@@ -59,7 +60,6 @@ struct coh901318_base {
struct coh901318_chan { struct coh901318_chan {
spinlock_t lock; spinlock_t lock;
int allocated; int allocated;
int completed;
int id; int id;
int stopped; int stopped;
...@@ -318,20 +318,6 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc, ...@@ -318,20 +318,6 @@ static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
return 0; return 0;
} }
static dma_cookie_t
coh901318_assign_cookie(struct coh901318_chan *cohc,
struct coh901318_desc *cohd)
{
dma_cookie_t cookie = cohc->chan.cookie;
if (++cookie < 0)
cookie = 1;
cohc->chan.cookie = cookie;
cohd->desc.cookie = cookie;
return cookie;
}
static struct coh901318_desc * static struct coh901318_desc *
coh901318_desc_get(struct coh901318_chan *cohc) coh901318_desc_get(struct coh901318_chan *cohc)
...@@ -705,7 +691,7 @@ static void dma_tasklet(unsigned long data) ...@@ -705,7 +691,7 @@ static void dma_tasklet(unsigned long data)
callback_param = cohd_fin->desc.callback_param; callback_param = cohd_fin->desc.callback_param;
/* sign this job as completed on the channel */ /* sign this job as completed on the channel */
cohc->completed = cohd_fin->desc.cookie; dma_cookie_complete(&cohd_fin->desc);
/* release the lli allocation and remove the descriptor */ /* release the lli allocation and remove the descriptor */
coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli);
...@@ -929,7 +915,7 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan) ...@@ -929,7 +915,7 @@ static int coh901318_alloc_chan_resources(struct dma_chan *chan)
coh901318_config(cohc, NULL); coh901318_config(cohc, NULL);
cohc->allocated = 1; cohc->allocated = 1;
cohc->completed = chan->cookie = 1; dma_cookie_init(chan);
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
...@@ -966,16 +952,16 @@ coh901318_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -966,16 +952,16 @@ coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
desc); desc);
struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
unsigned long flags; unsigned long flags;
dma_cookie_t cookie;
spin_lock_irqsave(&cohc->lock, flags); spin_lock_irqsave(&cohc->lock, flags);
cookie = dma_cookie_assign(tx);
tx->cookie = coh901318_assign_cookie(cohc, cohd);
coh901318_desc_queue(cohc, cohd); coh901318_desc_queue(cohc, cohd);
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
return tx->cookie; return cookie;
} }
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
...@@ -1035,7 +1021,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, ...@@ -1035,7 +1021,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction, unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags) unsigned long flags, void *context)
{ {
struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_lli *lli; struct coh901318_lli *lli;
...@@ -1165,17 +1151,12 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, ...@@ -1165,17 +1151,12 @@ coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *txstate) struct dma_tx_state *txstate)
{ {
struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_cookie_t last_used; enum dma_status ret;
dma_cookie_t last_complete;
int ret;
last_complete = cohc->completed;
last_used = chan->cookie;
ret = dma_async_is_complete(cookie, last_complete, last_used); ret = dma_cookie_status(chan, cookie, txstate);
/* FIXME: should be conditional on ret != DMA_SUCCESS? */
dma_set_residue(txstate, coh901318_get_bytes_left(chan));
dma_set_tx_state(txstate, last_complete, last_used,
coh901318_get_bytes_left(chan));
if (ret == DMA_IN_PROGRESS && cohc->stopped) if (ret == DMA_IN_PROGRESS && cohc->stopped)
ret = DMA_PAUSED; ret = DMA_PAUSED;
......
...@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v ...@@ -510,8 +510,8 @@ struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, v
dma_chan_name(chan)); dma_chan_name(chan));
list_del_rcu(&device->global_node); list_del_rcu(&device->global_node);
} else if (err) } else if (err)
pr_debug("dmaengine: failed to get %s: (%d)\n", pr_debug("%s: failed to get %s: (%d)\n",
dma_chan_name(chan), err); __func__, dma_chan_name(chan), err);
else else
break; break;
if (--device->privatecnt == 0) if (--device->privatecnt == 0)
...@@ -564,8 +564,8 @@ void dmaengine_get(void) ...@@ -564,8 +564,8 @@ void dmaengine_get(void)
list_del_rcu(&device->global_node); list_del_rcu(&device->global_node);
break; break;
} else if (err) } else if (err)
pr_err("dmaengine: failed to get %s: (%d)\n", pr_err("%s: failed to get %s: (%d)\n",
dma_chan_name(chan), err); __func__, dma_chan_name(chan), err);
} }
} }
......
/*
* The contents of this file are private to DMA engine drivers, and is not
* part of the API to be used by DMA engine users.
*/
#ifndef DMAENGINE_H
#define DMAENGINE_H
#include <linux/bug.h>
#include <linux/dmaengine.h>
/**
* dma_cookie_init - initialize the cookies for a DMA channel
* @chan: dma channel to initialize
*/
static inline void dma_cookie_init(struct dma_chan *chan)
{
chan->cookie = DMA_MIN_COOKIE;
chan->completed_cookie = DMA_MIN_COOKIE;
}
/**
* dma_cookie_assign - assign a DMA engine cookie to the descriptor
* @tx: descriptor needing cookie
*
* Assign a unique non-zero per-channel cookie to the descriptor.
* Note: caller is expected to hold a lock to prevent concurrency.
*/
static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
{
struct dma_chan *chan = tx->chan;
dma_cookie_t cookie;
cookie = chan->cookie + 1;
if (cookie < DMA_MIN_COOKIE)
cookie = DMA_MIN_COOKIE;
tx->cookie = chan->cookie = cookie;
return cookie;
}
/**
* dma_cookie_complete - complete a descriptor
* @tx: descriptor to complete
*
* Mark this descriptor complete by updating the channels completed
* cookie marker. Zero the descriptors cookie to prevent accidental
* repeated completions.
*
* Note: caller is expected to hold a lock to prevent concurrency.
*/
static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
{
BUG_ON(tx->cookie < DMA_MIN_COOKIE);
tx->chan->completed_cookie = tx->cookie;
tx->cookie = 0;
}
/**
* dma_cookie_status - report cookie status
* @chan: dma channel
* @cookie: cookie we are interested in
* @state: dma_tx_state structure to return last/used cookies
*
* Report the status of the cookie, filling in the state structure if
* non-NULL. No locking is required.
*/
static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
dma_cookie_t cookie, struct dma_tx_state *state)
{
dma_cookie_t used, complete;
used = chan->cookie;
complete = chan->completed_cookie;
barrier();
if (state) {
state->last = complete;
state->used = used;
state->residue = 0;
}
return dma_async_is_complete(cookie, complete, used);
}
static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
{
if (state)
state->residue = residue;
}
#endif
This diff is collapsed.
...@@ -13,6 +13,18 @@ ...@@ -13,6 +13,18 @@
#define DW_DMA_MAX_NR_CHANNELS 8 #define DW_DMA_MAX_NR_CHANNELS 8
/* flow controller */
enum dw_dma_fc {
DW_DMA_FC_D_M2M,
DW_DMA_FC_D_M2P,
DW_DMA_FC_D_P2M,
DW_DMA_FC_D_P2P,
DW_DMA_FC_P_P2M,
DW_DMA_FC_SP_P2P,
DW_DMA_FC_P_M2P,
DW_DMA_FC_DP_P2P,
};
/* /*
* Redefine this macro to handle differences between 32- and 64-bit * Redefine this macro to handle differences between 32- and 64-bit
* addressing, big vs. little endian, etc. * addressing, big vs. little endian, etc.
...@@ -146,13 +158,15 @@ struct dw_dma_chan { ...@@ -146,13 +158,15 @@ struct dw_dma_chan {
/* these other elements are all protected by lock */ /* these other elements are all protected by lock */
unsigned long flags; unsigned long flags;
dma_cookie_t completed;
struct list_head active_list; struct list_head active_list;
struct list_head queue; struct list_head queue;
struct list_head free_list; struct list_head free_list;
struct dw_cyclic_desc *cdesc; struct dw_cyclic_desc *cdesc;
unsigned int descs_allocated; unsigned int descs_allocated;
/* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig;
}; };
static inline struct dw_dma_chan_regs __iomem * static inline struct dw_dma_chan_regs __iomem *
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <mach/dma.h> #include <mach/dma.h>
#include "dmaengine.h"
/* M2P registers */ /* M2P registers */
#define M2P_CONTROL 0x0000 #define M2P_CONTROL 0x0000
#define M2P_CONTROL_STALLINT BIT(0) #define M2P_CONTROL_STALLINT BIT(0)
...@@ -122,7 +124,6 @@ struct ep93xx_dma_desc { ...@@ -122,7 +124,6 @@ struct ep93xx_dma_desc {
* @lock: lock protecting the fields following * @lock: lock protecting the fields following
* @flags: flags for the channel * @flags: flags for the channel
* @buffer: which buffer to use next (0/1) * @buffer: which buffer to use next (0/1)
* @last_completed: last completed cookie value
* @active: flattened chain of descriptors currently being processed * @active: flattened chain of descriptors currently being processed
* @queue: pending descriptors which are handled next * @queue: pending descriptors which are handled next
* @free_list: list of free descriptors which can be used * @free_list: list of free descriptors which can be used
...@@ -157,7 +158,6 @@ struct ep93xx_dma_chan { ...@@ -157,7 +158,6 @@ struct ep93xx_dma_chan {
#define EP93XX_DMA_IS_CYCLIC 0 #define EP93XX_DMA_IS_CYCLIC 0
int buffer; int buffer;
dma_cookie_t last_completed;
struct list_head active; struct list_head active;
struct list_head queue; struct list_head queue;
struct list_head free_list; struct list_head free_list;
...@@ -703,7 +703,7 @@ static void ep93xx_dma_tasklet(unsigned long data) ...@@ -703,7 +703,7 @@ static void ep93xx_dma_tasklet(unsigned long data)
desc = ep93xx_dma_get_active(edmac); desc = ep93xx_dma_get_active(edmac);
if (desc) { if (desc) {
if (desc->complete) { if (desc->complete) {
edmac->last_completed = desc->txd.cookie; dma_cookie_complete(&desc->txd);
list_splice_init(&edmac->active, &list); list_splice_init(&edmac->active, &list);
} }
callback = desc->txd.callback; callback = desc->txd.callback;
...@@ -783,17 +783,10 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -783,17 +783,10 @@ static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&edmac->lock, flags); spin_lock_irqsave(&edmac->lock, flags);
cookie = dma_cookie_assign(tx);
cookie = edmac->chan.cookie;
if (++cookie < 0)
cookie = 1;
desc = container_of(tx, struct ep93xx_dma_desc, txd); desc = container_of(tx, struct ep93xx_dma_desc, txd);
edmac->chan.cookie = cookie;
desc->txd.cookie = cookie;
/* /*
* If nothing is currently prosessed, we push this descriptor * If nothing is currently prosessed, we push this descriptor
* directly to the hardware. Otherwise we put the descriptor * directly to the hardware. Otherwise we put the descriptor
...@@ -861,8 +854,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan) ...@@ -861,8 +854,7 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
goto fail_clk_disable; goto fail_clk_disable;
spin_lock_irq(&edmac->lock); spin_lock_irq(&edmac->lock);
edmac->last_completed = 1; dma_cookie_init(&edmac->chan);
edmac->chan.cookie = 1;
ret = edmac->edma->hw_setup(edmac); ret = edmac->edma->hw_setup(edmac);
spin_unlock_irq(&edmac->lock); spin_unlock_irq(&edmac->lock);
...@@ -983,13 +975,14 @@ ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, ...@@ -983,13 +975,14 @@ ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
* @sg_len: number of entries in @sgl * @sg_len: number of entries in @sgl
* @dir: direction of tha DMA transfer * @dir: direction of tha DMA transfer
* @flags: flags for the descriptor * @flags: flags for the descriptor
* @context: operation context (ignored)
* *
* Returns a valid DMA descriptor or %NULL in case of failure. * Returns a valid DMA descriptor or %NULL in case of failure.
*/ */
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction dir, unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags) unsigned long flags, void *context)
{ {
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first; struct ep93xx_dma_desc *desc, *first;
...@@ -1056,6 +1049,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -1056,6 +1049,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
* @buf_len: length of the buffer (in bytes) * @buf_len: length of the buffer (in bytes)
* @period_len: lenght of a single period * @period_len: lenght of a single period
* @dir: direction of the operation * @dir: direction of the operation
* @context: operation context (ignored)
* *
* Prepares a descriptor for cyclic DMA operation. This means that once the * Prepares a descriptor for cyclic DMA operation. This means that once the
* descriptor is submitted, we will be submitting in a @period_len sized * descriptor is submitted, we will be submitting in a @period_len sized
...@@ -1068,7 +1062,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -1068,7 +1062,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len, size_t buf_len, size_t period_len,
enum dma_transfer_direction dir) enum dma_transfer_direction dir, void *context)
{ {
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first; struct ep93xx_dma_desc *desc, *first;
...@@ -1248,18 +1242,13 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan, ...@@ -1248,18 +1242,13 @@ static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
struct dma_tx_state *state) struct dma_tx_state *state)
{ {
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
dma_cookie_t last_used, last_completed;
enum dma_status ret; enum dma_status ret;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&edmac->lock, flags); spin_lock_irqsave(&edmac->lock, flags);
last_used = chan->cookie; ret = dma_cookie_status(chan, cookie, state);
last_completed = edmac->last_completed;
spin_unlock_irqrestore(&edmac->lock, flags); spin_unlock_irqrestore(&edmac->lock, flags);
ret = dma_async_is_complete(cookie, last_completed, last_used);
dma_set_tx_state(state, last_completed, last_used, 0);
return ret; return ret;
} }
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include "dmaengine.h"
#include "fsldma.h" #include "fsldma.h"
#define chan_dbg(chan, fmt, arg...) \ #define chan_dbg(chan, fmt, arg...) \
...@@ -413,17 +414,10 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -413,17 +414,10 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
* assign cookies to all of the software descriptors * assign cookies to all of the software descriptors
* that make up this transaction * that make up this transaction
*/ */
cookie = chan->common.cookie;
list_for_each_entry(child, &desc->tx_list, node) { list_for_each_entry(child, &desc->tx_list, node) {
cookie++; cookie = dma_cookie_assign(&child->async_tx);
if (cookie < DMA_MIN_COOKIE)
cookie = DMA_MIN_COOKIE;
child->async_tx.cookie = cookie;
} }
chan->common.cookie = cookie;
/* put this transaction onto the tail of the pending queue */ /* put this transaction onto the tail of the pending queue */
append_ld_queue(chan, desc); append_ld_queue(chan, desc);
...@@ -765,6 +759,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, ...@@ -765,6 +759,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
* @sg_len: number of entries in @scatterlist * @sg_len: number of entries in @scatterlist
* @direction: DMA direction * @direction: DMA direction
* @flags: DMAEngine flags * @flags: DMAEngine flags
* @context: transaction context (ignored)
* *
* Prepare a set of descriptors for a DMA_SLAVE transaction. Following the * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
* DMA_SLAVE API, this gets the device-specific information from the * DMA_SLAVE API, this gets the device-specific information from the
...@@ -772,7 +767,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, ...@@ -772,7 +767,8 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
*/ */
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction direction, unsigned long flags) enum dma_transfer_direction direction, unsigned long flags,
void *context)
{ {
/* /*
* This operation is not supported on the Freescale DMA controller * This operation is not supported on the Freescale DMA controller
...@@ -984,19 +980,14 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, ...@@ -984,19 +980,14 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan,
struct dma_tx_state *txstate) struct dma_tx_state *txstate)
{ {
struct fsldma_chan *chan = to_fsl_chan(dchan); struct fsldma_chan *chan = to_fsl_chan(dchan);
dma_cookie_t last_complete; enum dma_status ret;
dma_cookie_t last_used;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
ret = dma_cookie_status(dchan, cookie, txstate);
last_complete = chan->completed_cookie;
last_used = dchan->cookie;
spin_unlock_irqrestore(&chan->desc_lock, flags); spin_unlock_irqrestore(&chan->desc_lock, flags);
dma_set_tx_state(txstate, last_complete, last_used, 0); return ret;
return dma_async_is_complete(cookie, last_complete, last_used);
} }
/*----------------------------------------------------------------------------*/ /*----------------------------------------------------------------------------*/
...@@ -1087,8 +1078,8 @@ static void dma_do_tasklet(unsigned long data) ...@@ -1087,8 +1078,8 @@ static void dma_do_tasklet(unsigned long data)
desc = to_fsl_desc(chan->ld_running.prev); desc = to_fsl_desc(chan->ld_running.prev);
cookie = desc->async_tx.cookie; cookie = desc->async_tx.cookie;
dma_cookie_complete(&desc->async_tx);
chan->completed_cookie = cookie;
chan_dbg(chan, "completed_cookie=%d\n", cookie); chan_dbg(chan, "completed_cookie=%d\n", cookie);
} }
...@@ -1303,6 +1294,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, ...@@ -1303,6 +1294,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
chan->idle = true; chan->idle = true;
chan->common.device = &fdev->common; chan->common.device = &fdev->common;
dma_cookie_init(&chan->common);
/* find the IRQ line, if it exists in the device tree */ /* find the IRQ line, if it exists in the device tree */
chan->irq = irq_of_parse_and_map(node, 0); chan->irq = irq_of_parse_and_map(node, 0);
......
...@@ -137,7 +137,6 @@ struct fsldma_device { ...@@ -137,7 +137,6 @@ struct fsldma_device {
struct fsldma_chan { struct fsldma_chan {
char name[8]; /* Channel name */ char name[8]; /* Channel name */
struct fsldma_chan_regs __iomem *regs; struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */ spinlock_t desc_lock; /* Descriptor operation lock */
struct list_head ld_pending; /* Link descriptors queue */ struct list_head ld_pending; /* Link descriptors queue */
struct list_head ld_running; /* Link descriptors queue */ struct list_head ld_running; /* Link descriptors queue */
......
This diff is collapsed.
This diff is collapsed.
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include <linux/intel_mid_dma.h> #include <linux/intel_mid_dma.h>
#include <linux/module.h> #include <linux/module.h>
#include "dmaengine.h"
#define MAX_CHAN 4 /*max ch across controllers*/ #define MAX_CHAN 4 /*max ch across controllers*/
#include "intel_mid_dma_regs.h" #include "intel_mid_dma_regs.h"
...@@ -288,7 +290,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc, ...@@ -288,7 +290,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
struct intel_mid_dma_lli *llitem; struct intel_mid_dma_lli *llitem;
void *param_txd = NULL; void *param_txd = NULL;
midc->completed = txd->cookie; dma_cookie_complete(txd);
callback_txd = txd->callback; callback_txd = txd->callback;
param_txd = txd->callback_param; param_txd = txd->callback_param;
...@@ -434,14 +436,7 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -434,14 +436,7 @@ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx)
dma_cookie_t cookie; dma_cookie_t cookie;
spin_lock_bh(&midc->lock); spin_lock_bh(&midc->lock);
cookie = midc->chan.cookie; cookie = dma_cookie_assign(tx);
if (++cookie < 0)
cookie = 1;
midc->chan.cookie = cookie;
desc->txd.cookie = cookie;
if (list_empty(&midc->active_list)) if (list_empty(&midc->active_list))
list_add_tail(&desc->desc_node, &midc->active_list); list_add_tail(&desc->desc_node, &midc->active_list);
...@@ -482,31 +477,18 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, ...@@ -482,31 +477,18 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t cookie,
struct dma_tx_state *txstate) struct dma_tx_state *txstate)
{ {
struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan);
dma_cookie_t last_used; enum dma_status ret;
dma_cookie_t last_complete;
int ret;
last_complete = midc->completed; ret = dma_cookie_status(chan, cookie, txstate);
last_used = chan->cookie;
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) { if (ret != DMA_SUCCESS) {
spin_lock_bh(&midc->lock); spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc); midc_scan_descriptors(to_middma_device(chan->device), midc);
spin_unlock_bh(&midc->lock); spin_unlock_bh(&midc->lock);
last_complete = midc->completed; ret = dma_cookie_status(chan, cookie, txstate);
last_used = chan->cookie;
ret = dma_async_is_complete(cookie, last_complete, last_used);
} }
if (txstate) {
txstate->last = last_complete;
txstate->used = last_used;
txstate->residue = 0;
}
return ret; return ret;
} }
...@@ -732,13 +714,14 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy( ...@@ -732,13 +714,14 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
* @sg_len: length of sg txn * @sg_len: length of sg txn
* @direction: DMA transfer dirtn * @direction: DMA transfer dirtn
* @flags: DMA flags * @flags: DMA flags
* @context: transfer context (ignored)
* *
* Prepares LLI based periphral transfer * Prepares LLI based periphral transfer
*/ */
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction, unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags) unsigned long flags, void *context)
{ {
struct intel_mid_dma_chan *midc = NULL; struct intel_mid_dma_chan *midc = NULL;
struct intel_mid_dma_slave *mids = NULL; struct intel_mid_dma_slave *mids = NULL;
...@@ -832,7 +815,6 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) ...@@ -832,7 +815,6 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
/*trying to free ch in use!!!!!*/ /*trying to free ch in use!!!!!*/
pr_err("ERR_MDMA: trying to free ch in use\n"); pr_err("ERR_MDMA: trying to free ch in use\n");
} }
pm_runtime_put(&mid->pdev->dev);
spin_lock_bh(&midc->lock); spin_lock_bh(&midc->lock);
midc->descs_allocated = 0; midc->descs_allocated = 0;
list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) {
...@@ -853,6 +835,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan) ...@@ -853,6 +835,7 @@ static void intel_mid_dma_free_chan_resources(struct dma_chan *chan)
/* Disable CH interrupts */ /* Disable CH interrupts */
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK);
iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR);
pm_runtime_put(&mid->pdev->dev);
} }
/** /**
...@@ -886,7 +869,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan) ...@@ -886,7 +869,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
pm_runtime_put(&mid->pdev->dev); pm_runtime_put(&mid->pdev->dev);
return -EIO; return -EIO;
} }
midc->completed = chan->cookie = 1; dma_cookie_init(chan);
spin_lock_bh(&midc->lock); spin_lock_bh(&midc->lock);
while (midc->descs_allocated < DESCS_PER_CHANNEL) { while (midc->descs_allocated < DESCS_PER_CHANNEL) {
...@@ -1056,7 +1039,8 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) ...@@ -1056,7 +1039,8 @@ static irqreturn_t intel_mid_dma_interrupt(int irq, void *data)
} }
err_status &= mid->intr_mask; err_status &= mid->intr_mask;
if (err_status) { if (err_status) {
iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); iowrite32((err_status << INT_MASK_WE),
mid->dma_base + MASK_ERR);
call_tasklet = 1; call_tasklet = 1;
} }
if (call_tasklet) if (call_tasklet)
...@@ -1118,7 +1102,7 @@ static int mid_setup_dma(struct pci_dev *pdev) ...@@ -1118,7 +1102,7 @@ static int mid_setup_dma(struct pci_dev *pdev)
struct intel_mid_dma_chan *midch = &dma->ch[i]; struct intel_mid_dma_chan *midch = &dma->ch[i];
midch->chan.device = &dma->common; midch->chan.device = &dma->common;
midch->chan.cookie = 1; dma_cookie_init(&midch->chan);
midch->ch_id = dma->chan_base + i; midch->ch_id = dma->chan_base + i;
pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id);
......
...@@ -165,7 +165,6 @@ union intel_mid_dma_cfg_hi { ...@@ -165,7 +165,6 @@ union intel_mid_dma_cfg_hi {
* @dma_base: MMIO register space DMA engine base pointer * @dma_base: MMIO register space DMA engine base pointer
* @ch_id: DMA channel id * @ch_id: DMA channel id
* @lock: channel spinlock * @lock: channel spinlock
* @completed: DMA cookie
* @active_list: current active descriptors * @active_list: current active descriptors
* @queue: current queued up descriptors * @queue: current queued up descriptors
* @free_list: current free descriptors * @free_list: current free descriptors
...@@ -183,7 +182,6 @@ struct intel_mid_dma_chan { ...@@ -183,7 +182,6 @@ struct intel_mid_dma_chan {
void __iomem *dma_base; void __iomem *dma_base;
int ch_id; int ch_id;
spinlock_t lock; spinlock_t lock;
dma_cookie_t completed;
struct list_head active_list; struct list_head active_list;
struct list_head queue; struct list_head queue;
struct list_head free_list; struct list_head free_list;
......
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
#include "registers.h" #include "registers.h"
#include "hw.h" #include "hw.h"
#include "../dmaengine.h"
int ioat_pending_level = 4; int ioat_pending_level = 4;
module_param(ioat_pending_level, int, 0644); module_param(ioat_pending_level, int, 0644);
MODULE_PARM_DESC(ioat_pending_level, MODULE_PARM_DESC(ioat_pending_level,
...@@ -107,6 +109,7 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c ...@@ -107,6 +109,7 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
chan->reg_base = device->reg_base + (0x80 * (idx + 1)); chan->reg_base = device->reg_base + (0x80 * (idx + 1));
spin_lock_init(&chan->cleanup_lock); spin_lock_init(&chan->cleanup_lock);
chan->common.device = dma; chan->common.device = dma;
dma_cookie_init(&chan->common);
list_add_tail(&chan->common.device_node, &dma->channels); list_add_tail(&chan->common.device_node, &dma->channels);
device->idx[idx] = chan; device->idx[idx] = chan;
init_timer(&chan->timer); init_timer(&chan->timer);
...@@ -235,12 +238,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -235,12 +238,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_bh(&ioat->desc_lock); spin_lock_bh(&ioat->desc_lock);
/* cookie incr and addition to used_list must be atomic */ /* cookie incr and addition to used_list must be atomic */
cookie = c->cookie; cookie = dma_cookie_assign(tx);
cookie++;
if (cookie < 0)
cookie = 1;
c->cookie = cookie;
tx->cookie = cookie;
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
/* write address into NextDescriptor field of last desc in chain */ /* write address into NextDescriptor field of last desc in chain */
...@@ -603,8 +601,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) ...@@ -603,8 +601,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
*/ */
dump_desc_dbg(ioat, desc); dump_desc_dbg(ioat, desc);
if (tx->cookie) { if (tx->cookie) {
chan->completed_cookie = tx->cookie; dma_cookie_complete(tx);
tx->cookie = 0;
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
ioat->active -= desc->hw->tx_cnt; ioat->active -= desc->hw->tx_cnt;
if (tx->callback) { if (tx->callback) {
...@@ -733,13 +730,15 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, ...@@ -733,13 +730,15 @@ ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
{ {
struct ioat_chan_common *chan = to_chan_common(c); struct ioat_chan_common *chan = to_chan_common(c);
struct ioatdma_device *device = chan->device; struct ioatdma_device *device = chan->device;
enum dma_status ret;
if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) ret = dma_cookie_status(c, cookie, txstate);
return DMA_SUCCESS; if (ret == DMA_SUCCESS)
return ret;
device->cleanup_fn((unsigned long) c); device->cleanup_fn((unsigned long) c);
return ioat_tx_status(c, cookie, txstate); return dma_cookie_status(c, cookie, txstate);
} }
static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat)
......
...@@ -90,7 +90,6 @@ struct ioat_chan_common { ...@@ -90,7 +90,6 @@ struct ioat_chan_common {
void __iomem *reg_base; void __iomem *reg_base;
unsigned long last_completion; unsigned long last_completion;
spinlock_t cleanup_lock; spinlock_t cleanup_lock;
dma_cookie_t completed_cookie;
unsigned long state; unsigned long state;
#define IOAT_COMPLETION_PENDING 0 #define IOAT_COMPLETION_PENDING 0
#define IOAT_COMPLETION_ACK 1 #define IOAT_COMPLETION_ACK 1
...@@ -143,28 +142,6 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) ...@@ -143,28 +142,6 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c)
return container_of(chan, struct ioat_dma_chan, base); return container_of(chan, struct ioat_dma_chan, base);
} }
/**
* ioat_tx_status - poll the status of an ioat transaction
* @c: channel handle
* @cookie: transaction identifier
* @txstate: if set, updated with the transaction state
*/
static inline enum dma_status
ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate)
{
struct ioat_chan_common *chan = to_chan_common(c);
dma_cookie_t last_used;
dma_cookie_t last_complete;
last_used = c->cookie;
last_complete = chan->completed_cookie;
dma_set_tx_state(txstate, last_complete, last_used, 0);
return dma_async_is_complete(cookie, last_complete, last_used);
}
/* wrapper around hardware descriptor format + additional software fields */ /* wrapper around hardware descriptor format + additional software fields */
/** /**
......
...@@ -41,6 +41,8 @@ ...@@ -41,6 +41,8 @@
#include "registers.h" #include "registers.h"
#include "hw.h" #include "hw.h"
#include "../dmaengine.h"
int ioat_ring_alloc_order = 8; int ioat_ring_alloc_order = 8;
module_param(ioat_ring_alloc_order, int, 0644); module_param(ioat_ring_alloc_order, int, 0644);
MODULE_PARM_DESC(ioat_ring_alloc_order, MODULE_PARM_DESC(ioat_ring_alloc_order,
...@@ -147,8 +149,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ...@@ -147,8 +149,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
dump_desc_dbg(ioat, desc); dump_desc_dbg(ioat, desc);
if (tx->cookie) { if (tx->cookie) {
ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
chan->completed_cookie = tx->cookie; dma_cookie_complete(tx);
tx->cookie = 0;
if (tx->callback) { if (tx->callback) {
tx->callback(tx->callback_param); tx->callback(tx->callback_param);
tx->callback = NULL; tx->callback = NULL;
...@@ -398,13 +399,9 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) ...@@ -398,13 +399,9 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
struct dma_chan *c = tx->chan; struct dma_chan *c = tx->chan;
struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
dma_cookie_t cookie = c->cookie; dma_cookie_t cookie;
cookie++; cookie = dma_cookie_assign(tx);
if (cookie < 0)
cookie = 1;
tx->cookie = cookie;
c->cookie = cookie;
dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
......
...@@ -61,6 +61,7 @@ ...@@ -61,6 +61,7 @@
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include "../dmaengine.h"
#include "registers.h" #include "registers.h"
#include "hw.h" #include "hw.h"
#include "dma.h" #include "dma.h"
...@@ -277,9 +278,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ...@@ -277,9 +278,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
dump_desc_dbg(ioat, desc); dump_desc_dbg(ioat, desc);
tx = &desc->txd; tx = &desc->txd;
if (tx->cookie) { if (tx->cookie) {
chan->completed_cookie = tx->cookie; dma_cookie_complete(tx);
ioat3_dma_unmap(ioat, desc, idx + i); ioat3_dma_unmap(ioat, desc, idx + i);
tx->cookie = 0;
if (tx->callback) { if (tx->callback) {
tx->callback(tx->callback_param); tx->callback(tx->callback_param);
tx->callback = NULL; tx->callback = NULL;
...@@ -411,13 +411,15 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, ...@@ -411,13 +411,15 @@ ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie,
struct dma_tx_state *txstate) struct dma_tx_state *txstate)
{ {
struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
enum dma_status ret;
if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) ret = dma_cookie_status(c, cookie, txstate);
return DMA_SUCCESS; if (ret == DMA_SUCCESS)
return ret;
ioat3_cleanup(ioat); ioat3_cleanup(ioat);
return ioat_tx_status(c, cookie, txstate); return dma_cookie_status(c, cookie, txstate);
} }
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -81,7 +81,6 @@ struct ppc440spe_adma_device { ...@@ -81,7 +81,6 @@ struct ppc440spe_adma_device {
* @common: common dmaengine channel object members * @common: common dmaengine channel object members
* @all_slots: complete domain of slots usable by the channel * @all_slots: complete domain of slots usable by the channel
* @pending: allows batching of hardware operations * @pending: allows batching of hardware operations
* @completed_cookie: identifier for the most recently completed operation
* @slots_allocated: records the actual size of the descriptor slot pool * @slots_allocated: records the actual size of the descriptor slot pool
* @hw_chain_inited: h/w descriptor chain initialization flag * @hw_chain_inited: h/w descriptor chain initialization flag
* @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs * @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
...@@ -99,7 +98,6 @@ struct ppc440spe_adma_chan { ...@@ -99,7 +98,6 @@ struct ppc440spe_adma_chan {
struct list_head all_slots; struct list_head all_slots;
struct ppc440spe_adma_desc_slot *last_used; struct ppc440spe_adma_desc_slot *last_used;
int pending; int pending;
dma_cookie_t completed_cookie;
int slots_allocated; int slots_allocated;
int hw_chain_inited; int hw_chain_inited;
struct tasklet_struct irq_tasklet; struct tasklet_struct irq_tasklet;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment