Commit 7949456b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
  ppc440spe-adma: adds updated ppc440spe adma driver
  iop-adma.c: use resource_size()
  dmaengine: clarify the meaning of the DMA_CTRL_ACK flag
  sh: stylistic improvements for the DMA driver
  dmaengine: fix dmatest to verify minimum transfer length and test buffer size
  sh: DMA driver has to specify its alignment requirements
  Add COH 901 318 DMA block driver v5
parents 60d9aa75 12458ea0
PPC440SPe DMA/XOR (DMA Controller and XOR Accelerator)
Device nodes needed for operation of the ppc440spe-adma driver
are specified hereby. These are I2O/DMA, DMA and XOR nodes
for DMA engines and Memory Queue Module node. The latter is used
by ADMA driver for configuration of RAID-6 H/W capabilities of
the PPC440SPe. In addition to the nodes and properties described
below, the ranges property of PLB node must specify ranges for
DMA devices.
i) The I2O node
Required properties:
- compatible : "ibm,i2o-440spe";
- reg : <registers mapping>
- dcr-reg : <DCR registers range>
Example:
I2O: i2o@400100000 {
compatible = "ibm,i2o-440spe";
reg = <0x00000004 0x00100000 0x100>;
dcr-reg = <0x060 0x020>;
};
ii) The DMA node
Required properties:
- compatible : "ibm,dma-440spe";
- cell-index : 1 cell, hardware index of the DMA engine
(typically 0x0 and 0x1 for DMA0 and DMA1)
- reg : <registers mapping>
- dcr-reg : <DCR registers range>
- interrupts : <interrupt mapping for DMA0/1 interrupts sources:
2 sources: DMAx CS FIFO Needs Service IRQ (on UIC0)
and DMA Error IRQ (on UIC1). The latter is common
for both DMA engines>.
- interrupt-parent : needed for interrupt mapping
Example:
DMA0: dma0@400100100 {
compatible = "ibm,dma-440spe";
cell-index = <0>;
reg = <0x00000004 0x00100100 0x100>;
dcr-reg = <0x060 0x020>;
interrupt-parent = <&DMA0>;
interrupts = <0 1>;
#interrupt-cells = <1>;
#address-cells = <0>;
#size-cells = <0>;
interrupt-map = <
0 &UIC0 0x14 4
1 &UIC1 0x16 4>;
};
iii) XOR Accelerator node
Required properties:
- compatible : "amcc,xor-accelerator";
- reg : <registers mapping>
- interrupts : <interrupt mapping for XOR interrupt source>
- interrupt-parent : for interrupt mapping
Example:
xor-accel@400200000 {
compatible = "amcc,xor-accelerator";
reg = <0x00000004 0x00200000 0x400>;
interrupt-parent = <&UIC1>;
interrupts = <0x1f 4>;
};
iv) Memory Queue Module node
Required properties:
- compatible : "ibm,mq-440spe";
- dcr-reg : <DCR registers range>
Example:
MQ0: mq {
compatible = "ibm,mq-440spe";
dcr-reg = <0x040 0x020>;
};
/*
*
* include/linux/coh901318.h
*
*
* Copyright (C) 2007-2009 ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
* DMA driver for COH 901 318
* Author: Per Friden <per.friden@stericsson.com>
*/
#ifndef COH901318_H
#define COH901318_H
#include <linux/device.h>
#include <linux/dmaengine.h>
#define MAX_DMA_PACKET_SIZE_SHIFT 11
#define MAX_DMA_PACKET_SIZE (1 << MAX_DMA_PACKET_SIZE_SHIFT)
/**
* struct coh901318_lli - linked list item for DMAC
* @control: control settings for DMAC
* @src_addr: transfer source address
* @dst_addr: transfer destination address
* @link_addr: physical address to next lli
* @virt_link_addr: virtual addres of next lli (only used by pool_free)
* @phy_this: physical address of current lli (only used by pool_free)
*/
struct coh901318_lli {
u32 control;
dma_addr_t src_addr;
dma_addr_t dst_addr;
dma_addr_t link_addr;
void *virt_link_addr;
dma_addr_t phy_this;
};
/**
* struct coh901318_params - parameters for DMAC configuration
* @config: DMA config register
* @ctrl_lli_last: DMA control register for the last lli in the list
* @ctrl_lli: DMA control register for an lli
* @ctrl_lli_chained: DMA control register for a chained lli
*/
struct coh901318_params {
u32 config;
u32 ctrl_lli_last;
u32 ctrl_lli;
u32 ctrl_lli_chained;
};
/**
* struct coh_dma_channel - dma channel base
* @name: ascii name of dma channel
* @number: channel id number
* @desc_nbr_max: number of preallocated descriptortors
* @priority_high: prio of channel, 0 low otherwise high.
* @param: configuration parameters
* @dev_addr: physical address of periphal connected to channel
*/
struct coh_dma_channel {
const char name[32];
const int number;
const int desc_nbr_max;
const int priority_high;
const struct coh901318_params param;
const dma_addr_t dev_addr;
};
/**
* dma_access_memory_state_t - register dma for memory access
*
* @dev: The dma device
* @active: 1 means dma intends to access memory
* 0 means dma wont access memory
*/
typedef void (*dma_access_memory_state_t)(struct device *dev,
bool active);
/**
* struct powersave - DMA power save structure
* @lock: lock protecting data in this struct
* @started_channels: bit mask indicating active dma channels
*/
struct powersave {
spinlock_t lock;
u64 started_channels;
};
/**
* struct coh901318_platform - platform arch structure
* @chans_slave: specifying dma slave channels
* @chans_memcpy: specifying dma memcpy channels
* @access_memory_state: requesting DMA memeory access (on / off)
* @chan_conf: dma channel configurations
* @max_channels: max number of dma chanenls
*/
struct coh901318_platform {
const int *chans_slave;
const int *chans_memcpy;
const dma_access_memory_state_t access_memory_state;
const struct coh_dma_channel *chan_conf;
const int max_channels;
};
/**
* coh901318_get_bytes_left() - Get number of bytes left on a current transfer
* @chan: dma channel handle
* return number of bytes left, or negative on error
*/
u32 coh901318_get_bytes_left(struct dma_chan *chan);
/**
* coh901318_stop() - Stops dma transfer
* @chan: dma channel handle
* return 0 on success otherwise negative value
*/
void coh901318_stop(struct dma_chan *chan);
/**
* coh901318_continue() - Resumes a stopped dma transfer
* @chan: dma channel handle
* return 0 on success otherwise negative value
*/
void coh901318_continue(struct dma_chan *chan);
/**
* coh901318_filter_id() - DMA channel filter function
* @chan: dma channel handle
* @chan_id: id of dma channel to be filter out
*
* In dma_request_channel() it specifies what channel id to be requested
*/
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id);
/*
* DMA Controller - this access the static mappings of the coh901318 dma.
*
*/
#define COH901318_MOD32_MASK (0x1F)
#define COH901318_WORD_MASK (0xFFFFFFFF)
/* INT_STATUS - Interrupt Status Registers 32bit (R/-) */
#define COH901318_INT_STATUS1 (0x0000)
#define COH901318_INT_STATUS2 (0x0004)
/* TC_INT_STATUS - Terminal Count Interrupt Status Registers 32bit (R/-) */
#define COH901318_TC_INT_STATUS1 (0x0008)
#define COH901318_TC_INT_STATUS2 (0x000C)
/* TC_INT_CLEAR - Terminal Count Interrupt Clear Registers 32bit (-/W) */
#define COH901318_TC_INT_CLEAR1 (0x0010)
#define COH901318_TC_INT_CLEAR2 (0x0014)
/* RAW_TC_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
#define COH901318_RAW_TC_INT_STATUS1 (0x0018)
#define COH901318_RAW_TC_INT_STATUS2 (0x001C)
/* BE_INT_STATUS - Bus Error Interrupt Status Registers 32bit (R/-) */
#define COH901318_BE_INT_STATUS1 (0x0020)
#define COH901318_BE_INT_STATUS2 (0x0024)
/* BE_INT_CLEAR - Bus Error Interrupt Clear Registers 32bit (-/W) */
#define COH901318_BE_INT_CLEAR1 (0x0028)
#define COH901318_BE_INT_CLEAR2 (0x002C)
/* RAW_BE_INT_STATUS - Raw Term Count Interrupt Status Registers 32bit (R/-) */
#define COH901318_RAW_BE_INT_STATUS1 (0x0030)
#define COH901318_RAW_BE_INT_STATUS2 (0x0034)
/*
* CX_CFG - Channel Configuration Registers 32bit (R/W)
*/
#define COH901318_CX_CFG (0x0100)
#define COH901318_CX_CFG_SPACING (0x04)
/* Channel enable activates tha dma job */
#define COH901318_CX_CFG_CH_ENABLE (0x00000001)
#define COH901318_CX_CFG_CH_DISABLE (0x00000000)
/* Request Mode */
#define COH901318_CX_CFG_RM_MASK (0x00000006)
#define COH901318_CX_CFG_RM_MEMORY_TO_MEMORY (0x0 << 1)
#define COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY (0x1 << 1)
#define COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY (0x1 << 1)
#define COH901318_CX_CFG_RM_PRIMARY_TO_SECONDARY (0x3 << 1)
#define COH901318_CX_CFG_RM_SECONDARY_TO_PRIMARY (0x3 << 1)
/* Linked channel request field. RM must == 11 */
#define COH901318_CX_CFG_LCRF_SHIFT 3
#define COH901318_CX_CFG_LCRF_MASK (0x000001F8)
#define COH901318_CX_CFG_LCR_DISABLE (0x00000000)
/* Terminal Counter Interrupt Request Mask */
#define COH901318_CX_CFG_TC_IRQ_ENABLE (0x00000200)
#define COH901318_CX_CFG_TC_IRQ_DISABLE (0x00000000)
/* Bus Error interrupt Mask */
#define COH901318_CX_CFG_BE_IRQ_ENABLE (0x00000400)
#define COH901318_CX_CFG_BE_IRQ_DISABLE (0x00000000)
/*
* CX_STAT - Channel Status Registers 32bit (R/-)
*/
#define COH901318_CX_STAT (0x0200)
#define COH901318_CX_STAT_SPACING (0x04)
#define COH901318_CX_STAT_RBE_IRQ_IND (0x00000008)
#define COH901318_CX_STAT_RTC_IRQ_IND (0x00000004)
#define COH901318_CX_STAT_ACTIVE (0x00000002)
#define COH901318_CX_STAT_ENABLED (0x00000001)
/*
* CX_CTRL - Channel Control Registers 32bit (R/W)
*/
#define COH901318_CX_CTRL (0x0400)
#define COH901318_CX_CTRL_SPACING (0x10)
/* Transfer Count Enable */
#define COH901318_CX_CTRL_TC_ENABLE (0x00001000)
#define COH901318_CX_CTRL_TC_DISABLE (0x00000000)
/* Transfer Count Value 0 - 4095 */
#define COH901318_CX_CTRL_TC_VALUE_MASK (0x00000FFF)
/* Burst count */
#define COH901318_CX_CTRL_BURST_COUNT_MASK (0x0000E000)
#define COH901318_CX_CTRL_BURST_COUNT_64_BYTES (0x7 << 13)
#define COH901318_CX_CTRL_BURST_COUNT_48_BYTES (0x6 << 13)
#define COH901318_CX_CTRL_BURST_COUNT_32_BYTES (0x5 << 13)
#define COH901318_CX_CTRL_BURST_COUNT_16_BYTES (0x4 << 13)
#define COH901318_CX_CTRL_BURST_COUNT_8_BYTES (0x3 << 13)
#define COH901318_CX_CTRL_BURST_COUNT_4_BYTES (0x2 << 13)
#define COH901318_CX_CTRL_BURST_COUNT_2_BYTES (0x1 << 13)
#define COH901318_CX_CTRL_BURST_COUNT_1_BYTE (0x0 << 13)
/* Source bus size */
#define COH901318_CX_CTRL_SRC_BUS_SIZE_MASK (0x00030000)
#define COH901318_CX_CTRL_SRC_BUS_SIZE_32_BITS (0x2 << 16)
#define COH901318_CX_CTRL_SRC_BUS_SIZE_16_BITS (0x1 << 16)
#define COH901318_CX_CTRL_SRC_BUS_SIZE_8_BITS (0x0 << 16)
/* Source address increment */
#define COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE (0x00040000)
#define COH901318_CX_CTRL_SRC_ADDR_INC_DISABLE (0x00000000)
/* Destination Bus Size */
#define COH901318_CX_CTRL_DST_BUS_SIZE_MASK (0x00180000)
#define COH901318_CX_CTRL_DST_BUS_SIZE_32_BITS (0x2 << 19)
#define COH901318_CX_CTRL_DST_BUS_SIZE_16_BITS (0x1 << 19)
#define COH901318_CX_CTRL_DST_BUS_SIZE_8_BITS (0x0 << 19)
/* Destination address increment */
#define COH901318_CX_CTRL_DST_ADDR_INC_ENABLE (0x00200000)
#define COH901318_CX_CTRL_DST_ADDR_INC_DISABLE (0x00000000)
/* Master Mode (Master2 is only connected to MSL) */
#define COH901318_CX_CTRL_MASTER_MODE_MASK (0x00C00000)
#define COH901318_CX_CTRL_MASTER_MODE_M2R_M1W (0x3 << 22)
#define COH901318_CX_CTRL_MASTER_MODE_M1R_M2W (0x2 << 22)
#define COH901318_CX_CTRL_MASTER_MODE_M2RW (0x1 << 22)
#define COH901318_CX_CTRL_MASTER_MODE_M1RW (0x0 << 22)
/* Terminal Count flag to PER enable */
#define COH901318_CX_CTRL_TCP_ENABLE (0x01000000)
#define COH901318_CX_CTRL_TCP_DISABLE (0x00000000)
/* Terminal Count flags to CPU enable */
#define COH901318_CX_CTRL_TC_IRQ_ENABLE (0x02000000)
#define COH901318_CX_CTRL_TC_IRQ_DISABLE (0x00000000)
/* Hand shake to peripheral */
#define COH901318_CX_CTRL_HSP_ENABLE (0x04000000)
#define COH901318_CX_CTRL_HSP_DISABLE (0x00000000)
#define COH901318_CX_CTRL_HSS_ENABLE (0x08000000)
#define COH901318_CX_CTRL_HSS_DISABLE (0x00000000)
/* DMA mode */
#define COH901318_CX_CTRL_DDMA_MASK (0x30000000)
#define COH901318_CX_CTRL_DDMA_LEGACY (0x0 << 28)
#define COH901318_CX_CTRL_DDMA_DEMAND_DMA1 (0x1 << 28)
#define COH901318_CX_CTRL_DDMA_DEMAND_DMA2 (0x2 << 28)
/* Primary Request Data Destination */
#define COH901318_CX_CTRL_PRDD_MASK (0x40000000)
#define COH901318_CX_CTRL_PRDD_DEST (0x1 << 30)
#define COH901318_CX_CTRL_PRDD_SOURCE (0x0 << 30)
/*
* CX_SRC_ADDR - Channel Source Address Registers 32bit (R/W)
*/
#define COH901318_CX_SRC_ADDR (0x0404)
#define COH901318_CX_SRC_ADDR_SPACING (0x10)
/*
* CX_DST_ADDR - Channel Destination Address Registers 32bit R/W
*/
#define COH901318_CX_DST_ADDR (0x0408)
#define COH901318_CX_DST_ADDR_SPACING (0x10)
/*
* CX_LNK_ADDR - Channel Link Address Registers 32bit (R/W)
*/
#define COH901318_CX_LNK_ADDR (0x040C)
#define COH901318_CX_LNK_ADDR_SPACING (0x10)
#define COH901318_CX_LNK_LINK_IMMEDIATE (0x00000001)
#endif /* COH901318_H */
/*
* Copyright (C) 2008-2009 DENX Software Engineering.
*
* Author: Yuri Tikhonov <yur@emcraft.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution in the
* file called COPYING.
*/
#ifndef _ASM_POWERPC_ASYNC_TX_H_
#define _ASM_POWERPC_ASYNC_TX_H_
#if defined(CONFIG_440SPe) || defined(CONFIG_440SP)
extern struct dma_chan *
ppc440spe_async_tx_find_best_channel(enum dma_transaction_type cap,
struct page **dst_lst, int dst_cnt, struct page **src_lst,
int src_cnt, size_t src_sz);
#define async_tx_find_channel(dep, cap, dst_lst, dst_cnt, src_lst, \
src_cnt, src_sz) \
ppc440spe_async_tx_find_best_channel(cap, dst_lst, dst_cnt, src_lst, \
src_cnt, src_sz)
#else
#define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
__async_tx_find_channel(dep, type)
struct dma_chan *
__async_tx_find_channel(struct async_submit_ctl *submit,
enum dma_transaction_type tx_type);
#endif
#endif
......@@ -157,4 +157,27 @@
#define L2C_SNP_SSR_32G 0x0000f000
#define L2C_SNP_ESR 0x00000800
/*
* DCR register offsets for 440SP/440SPe I2O/DMA controller.
* The base address is configured in the device tree.
*/
#define DCRN_I2O0_IBAL 0x006
#define DCRN_I2O0_IBAH 0x007
#define I2O_REG_ENABLE 0x00000001 /* Enable I2O/DMA access */
/* 440SP/440SPe Software Reset DCR */
#define DCRN_SDR0_SRST 0x0200
#define DCRN_SDR0_SRST_I2ODMA (0x80000000 >> 15) /* Reset I2O/DMA */
/* 440SP/440SPe Memory Queue DCR offsets */
#define DCRN_MQ0_XORBA 0x04
#define DCRN_MQ0_CF2H 0x06
#define DCRN_MQ0_CFBHL 0x0f
#define DCRN_MQ0_BAUH 0x10
/* HB/LL Paths Configuration Register */
#define MQ0_CFBHL_TPLM 28
#define MQ0_CFBHL_HBCL 23
#define MQ0_CFBHL_POLY 15
#endif /* __DCR_REGS_H__ */
......@@ -111,6 +111,24 @@ config SH_DMAE
help
Enable support for the Renesas SuperH DMA controllers.
config COH901318
bool "ST-Ericsson COH901318 DMA support"
select DMA_ENGINE
depends on ARCH_U300
help
Enable support for ST-Ericsson COH 901 318 DMA.
config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP
select DMA_ENGINE
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
help
Enable support for the AMCC PPC440SPe RAID engines.
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
config DMA_ENGINE
bool
......
......@@ -10,3 +10,5 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_MX3_IPU) += ipu/
obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
/*
* driver/dma/coh901318.c
*
* Copyright (C) 2007-2009 ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
* DMA driver for COH 901 318
* Author: Per Friden <per.friden@stericsson.com>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h> /* printk() */
#include <linux/fs.h> /* everything... */
#include <linux/slab.h> /* kmalloc() */
#include <linux/dmaengine.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/irqreturn.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <mach/coh901318.h>
#include "coh901318_lli.h"
#define COHC_2_DEV(cohc) (&cohc->chan.dev->device)
#ifdef VERBOSE_DEBUG
#define COH_DBG(x) ({ if (1) x; 0; })
#else
#define COH_DBG(x) ({ if (0) x; 0; })
#endif
struct coh901318_desc {
struct dma_async_tx_descriptor desc;
struct list_head node;
struct scatterlist *sg;
unsigned int sg_len;
struct coh901318_lli *data;
enum dma_data_direction dir;
int pending_irqs;
unsigned long flags;
};
struct coh901318_base {
struct device *dev;
void __iomem *virtbase;
struct coh901318_pool pool;
struct powersave pm;
struct dma_device dma_slave;
struct dma_device dma_memcpy;
struct coh901318_chan *chans;
struct coh901318_platform *platform;
};
struct coh901318_chan {
spinlock_t lock;
int allocated;
int completed;
int id;
int stopped;
struct work_struct free_work;
struct dma_chan chan;
struct tasklet_struct tasklet;
struct list_head active;
struct list_head queue;
struct list_head free;
unsigned long nbr_active_done;
unsigned long busy;
int pending_irqs;
struct coh901318_base *base;
};
static void coh901318_list_print(struct coh901318_chan *cohc,
struct coh901318_lli *lli)
{
struct coh901318_lli *l;
dma_addr_t addr = virt_to_phys(lli);
int i = 0;
while (addr) {
l = phys_to_virt(addr);
dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
", dst 0x%x, link 0x%x link_virt 0x%p\n",
i, l, l->control, l->src_addr, l->dst_addr,
l->link_addr, phys_to_virt(l->link_addr));
i++;
addr = l->link_addr;
}
}
#ifdef CONFIG_DEBUG_FS
#define COH901318_DEBUGFS_ASSIGN(x, y) (x = y)
static struct coh901318_base *debugfs_dma_base;
static struct dentry *dma_dentry;
static int coh901318_debugfs_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static int coh901318_debugfs_read(struct file *file, char __user *buf,
size_t count, loff_t *f_pos)
{
u64 started_channels = debugfs_dma_base->pm.started_channels;
int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
int i;
int ret = 0;
char *dev_buf;
char *tmp;
int dev_size;
dev_buf = kmalloc(4*1024, GFP_KERNEL);
if (dev_buf == NULL)
goto err_kmalloc;
tmp = dev_buf;
tmp += sprintf(tmp, "DMA -- enable dma channels\n");
for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
if (started_channels & (1 << i))
tmp += sprintf(tmp, "channel %d\n", i);
tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
dev_size = tmp - dev_buf;
/* No more to read if offset != 0 */
if (*f_pos > dev_size)
goto out;
if (count > dev_size - *f_pos)
count = dev_size - *f_pos;
if (copy_to_user(buf, dev_buf + *f_pos, count))
ret = -EINVAL;
ret = count;
*f_pos += count;
out:
kfree(dev_buf);
return ret;
err_kmalloc:
return 0;
}
static const struct file_operations coh901318_debugfs_status_operations = {
.owner = THIS_MODULE,
.open = coh901318_debugfs_open,
.read = coh901318_debugfs_read,
};
static int __init init_coh901318_debugfs(void)
{
dma_dentry = debugfs_create_dir("dma", NULL);
(void) debugfs_create_file("status",
S_IFREG | S_IRUGO,
dma_dentry, NULL,
&coh901318_debugfs_status_operations);
return 0;
}
static void __exit exit_coh901318_debugfs(void)
{
debugfs_remove_recursive(dma_dentry);
}
module_init(init_coh901318_debugfs);
module_exit(exit_coh901318_debugfs);
#else
#define COH901318_DEBUGFS_ASSIGN(x, y)
#endif /* CONFIG_DEBUG_FS */
static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
{
return container_of(chan, struct coh901318_chan, chan);
}
static inline dma_addr_t
cohc_dev_addr(struct coh901318_chan *cohc)
{
return cohc->base->platform->chan_conf[cohc->id].dev_addr;
}
static inline const struct coh901318_params *
cohc_chan_param(struct coh901318_chan *cohc)
{
return &cohc->base->platform->chan_conf[cohc->id].param;
}
static inline const struct coh_dma_channel *
cohc_chan_conf(struct coh901318_chan *cohc)
{
return &cohc->base->platform->chan_conf[cohc->id];
}
static void enable_powersave(struct coh901318_chan *cohc)
{
unsigned long flags;
struct powersave *pm = &cohc->base->pm;
spin_lock_irqsave(&pm->lock, flags);
pm->started_channels &= ~(1ULL << cohc->id);
if (!pm->started_channels) {
/* DMA no longer intends to access memory */
cohc->base->platform->access_memory_state(cohc->base->dev,
false);
}
spin_unlock_irqrestore(&pm->lock, flags);
}
static void disable_powersave(struct coh901318_chan *cohc)
{
unsigned long flags;
struct powersave *pm = &cohc->base->pm;
spin_lock_irqsave(&pm->lock, flags);
if (!pm->started_channels) {
/* DMA intends to access memory */
cohc->base->platform->access_memory_state(cohc->base->dev,
true);
}
pm->started_channels |= (1ULL << cohc->id);
spin_unlock_irqrestore(&pm->lock, flags);
}
static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control)
{
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
writel(control,
virtbase + COH901318_CX_CTRL +
COH901318_CX_CTRL_SPACING * channel);
return 0;
}
static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf)
{
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
writel(conf,
virtbase + COH901318_CX_CFG +
COH901318_CX_CFG_SPACING*channel);
return 0;
}
static int coh901318_start(struct coh901318_chan *cohc)
{
u32 val;
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
disable_powersave(cohc);
val = readl(virtbase + COH901318_CX_CFG +
COH901318_CX_CFG_SPACING * channel);
/* Enable channel */
val |= COH901318_CX_CFG_CH_ENABLE;
writel(val, virtbase + COH901318_CX_CFG +
COH901318_CX_CFG_SPACING * channel);
return 0;
}
static int coh901318_prep_linked_list(struct coh901318_chan *cohc,
struct coh901318_lli *data)
{
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
BUG_ON(readl(virtbase + COH901318_CX_STAT +
COH901318_CX_STAT_SPACING*channel) &
COH901318_CX_STAT_ACTIVE);
writel(data->src_addr,
virtbase + COH901318_CX_SRC_ADDR +
COH901318_CX_SRC_ADDR_SPACING * channel);
writel(data->dst_addr, virtbase +
COH901318_CX_DST_ADDR +
COH901318_CX_DST_ADDR_SPACING * channel);
writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR +
COH901318_CX_LNK_ADDR_SPACING * channel);
writel(data->control, virtbase + COH901318_CX_CTRL +
COH901318_CX_CTRL_SPACING * channel);
return 0;
}
static dma_cookie_t
coh901318_assign_cookie(struct coh901318_chan *cohc,
struct coh901318_desc *cohd)
{
dma_cookie_t cookie = cohc->chan.cookie;
if (++cookie < 0)
cookie = 1;
cohc->chan.cookie = cookie;
cohd->desc.cookie = cookie;
return cookie;
}
static struct coh901318_desc *
coh901318_desc_get(struct coh901318_chan *cohc)
{
struct coh901318_desc *desc;
if (list_empty(&cohc->free)) {
/* alloc new desc because we're out of used ones
* TODO: alloc a pile of descs instead of just one,
* avoid many small allocations.
*/
desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
if (desc == NULL)
goto out;
INIT_LIST_HEAD(&desc->node);
} else {
/* Reuse an old desc. */
desc = list_first_entry(&cohc->free,
struct coh901318_desc,
node);
list_del(&desc->node);
}
out:
return desc;
}
static void
coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd)
{
list_add_tail(&cohd->node, &cohc->free);
}
/* call with irq lock held */
static void
coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
{
list_add_tail(&desc->node, &cohc->active);
BUG_ON(cohc->pending_irqs != 0);
cohc->pending_irqs = desc->pending_irqs;
}
static struct coh901318_desc *
coh901318_first_active_get(struct coh901318_chan *cohc)
{
struct coh901318_desc *d;
if (list_empty(&cohc->active))
return NULL;
d = list_first_entry(&cohc->active,
struct coh901318_desc,
node);
return d;
}
static void
coh901318_desc_remove(struct coh901318_desc *cohd)
{
list_del(&cohd->node);
}
static void
coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc)
{
list_add_tail(&desc->node, &cohc->queue);
}
static struct coh901318_desc *
coh901318_first_queued(struct coh901318_chan *cohc)
{
struct coh901318_desc *d;
if (list_empty(&cohc->queue))
return NULL;
d = list_first_entry(&cohc->queue,
struct coh901318_desc,
node);
return d;
}
/*
* DMA start/stop controls
*/
u32 coh901318_get_bytes_left(struct dma_chan *chan)
{
unsigned long flags;
u32 ret;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
spin_lock_irqsave(&cohc->lock, flags);
/* Read transfer count value */
ret = readl(cohc->base->virtbase +
COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK;
spin_unlock_irqrestore(&cohc->lock, flags);
return ret;
}
EXPORT_SYMBOL(coh901318_get_bytes_left);
/* Stops a transfer without losing data. Enables power save.
Use this function in conjunction with coh901318_continue(..)
*/
void coh901318_stop(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
spin_lock_irqsave(&cohc->lock, flags);
/* Disable channel in HW */
val = readl(virtbase + COH901318_CX_CFG +
COH901318_CX_CFG_SPACING * channel);
/* Stopping infinit transfer */
if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 &&
(val & COH901318_CX_CFG_CH_ENABLE))
cohc->stopped = 1;
val &= ~COH901318_CX_CFG_CH_ENABLE;
/* Enable twice, HW bug work around */
writel(val, virtbase + COH901318_CX_CFG +
COH901318_CX_CFG_SPACING * channel);
writel(val, virtbase + COH901318_CX_CFG +
COH901318_CX_CFG_SPACING * channel);
/* Spin-wait for it to actually go inactive */
while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING *
channel) & COH901318_CX_STAT_ACTIVE)
cpu_relax();
/* Check if we stopped an active job */
if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING *
channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0)
cohc->stopped = 1;
enable_powersave(cohc);
spin_unlock_irqrestore(&cohc->lock, flags);
}
EXPORT_SYMBOL(coh901318_stop);
/* Continues a transfer that has been stopped via 300_dma_stop(..).
Power save is handled.
*/
void coh901318_continue(struct dma_chan *chan)
{
u32 val;
unsigned long flags;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
int channel = cohc->id;
spin_lock_irqsave(&cohc->lock, flags);
disable_powersave(cohc);
if (cohc->stopped) {
/* Enable channel in HW */
val = readl(cohc->base->virtbase + COH901318_CX_CFG +
COH901318_CX_CFG_SPACING * channel);
val |= COH901318_CX_CFG_CH_ENABLE;
writel(val, cohc->base->virtbase + COH901318_CX_CFG +
COH901318_CX_CFG_SPACING*channel);
cohc->stopped = 0;
}
spin_unlock_irqrestore(&cohc->lock, flags);
}
EXPORT_SYMBOL(coh901318_continue);
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{
unsigned int ch_nr = (unsigned int) chan_id;
if (ch_nr == to_coh901318_chan(chan)->id)
return true;
return false;
}
EXPORT_SYMBOL(coh901318_filter_id);
/*
* DMA channel allocation
*/
static int coh901318_config(struct coh901318_chan *cohc,
struct coh901318_params *param)
{
unsigned long flags;
const struct coh901318_params *p;
int channel = cohc->id;
void __iomem *virtbase = cohc->base->virtbase;
spin_lock_irqsave(&cohc->lock, flags);
if (param)
p = param;
else
p = &cohc->base->platform->chan_conf[channel].param;
/* Clear any pending BE or TC interrupt */
if (channel < 32) {
writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1);
writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1);
} else {
writel(1 << (channel - 32), virtbase +
COH901318_BE_INT_CLEAR2);
writel(1 << (channel - 32), virtbase +
COH901318_TC_INT_CLEAR2);
}
coh901318_set_conf(cohc, p->config);
coh901318_set_ctrl(cohc, p->ctrl_lli_last);
spin_unlock_irqrestore(&cohc->lock, flags);
return 0;
}
/* must lock when calling this function
* start queued jobs, if any
* TODO: start all queued jobs in one go
*
* Returns descriptor if queued job is started otherwise NULL.
* If the queue is empty NULL is returned.
*/
static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
{
struct coh901318_desc *cohd_que;
/* start queued jobs, if any
* TODO: transmit all queued jobs in one go
*/
cohd_que = coh901318_first_queued(cohc);
if (cohd_que != NULL) {
/* Remove from queue */
coh901318_desc_remove(cohd_que);
/* initiate DMA job */
cohc->busy = 1;
coh901318_desc_submit(cohc, cohd_que);
coh901318_prep_linked_list(cohc, cohd_que->data);
/* start dma job */
coh901318_start(cohc);
}
return cohd_que;
}
static void dma_tasklet(unsigned long data)
{
struct coh901318_chan *cohc = (struct coh901318_chan *) data;
struct coh901318_desc *cohd_fin;
unsigned long flags;
dma_async_tx_callback callback;
void *callback_param;
spin_lock_irqsave(&cohc->lock, flags);
/* get first active entry from list */
cohd_fin = coh901318_first_active_get(cohc);
BUG_ON(cohd_fin->pending_irqs == 0);
if (cohd_fin == NULL)
goto err;
cohd_fin->pending_irqs--;
cohc->completed = cohd_fin->desc.cookie;
BUG_ON(cohc->nbr_active_done && cohd_fin == NULL);
if (cohc->nbr_active_done == 0)
return;
if (!cohd_fin->pending_irqs) {
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
}
dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d"
" nbr_active_done %ld\n", __func__,
cohc->id, cohc->pending_irqs, cohc->nbr_active_done);
/* callback to client */
callback = cohd_fin->desc.callback;
callback_param = cohd_fin->desc.callback_param;
if (!cohd_fin->pending_irqs) {
coh901318_desc_remove(cohd_fin);
/* return desc to free-list */
coh901318_desc_free(cohc, cohd_fin);
}
if (cohc->nbr_active_done)
cohc->nbr_active_done--;
if (cohc->nbr_active_done) {
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
tasklet_schedule(&cohc->tasklet);
}
spin_unlock_irqrestore(&cohc->lock, flags);
if (callback)
callback(callback_param);
return;
err:
spin_unlock_irqrestore(&cohc->lock, flags);
dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__);
}
/* called from interrupt context */
static void dma_tc_handle(struct coh901318_chan *cohc)
{
BUG_ON(!cohc->allocated && (list_empty(&cohc->active) ||
list_empty(&cohc->queue)));
if (!cohc->allocated)
return;
BUG_ON(cohc->pending_irqs == 0);
cohc->pending_irqs--;
cohc->nbr_active_done++;
if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
BUG_ON(list_empty(&cohc->active));
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
tasklet_schedule(&cohc->tasklet);
}
static irqreturn_t dma_irq_handler(int irq, void *dev_id)
{
u32 status1;
u32 status2;
int i;
int ch;
struct coh901318_base *base = dev_id;
struct coh901318_chan *cohc;
void __iomem *virtbase = base->virtbase;
status1 = readl(virtbase + COH901318_INT_STATUS1);
status2 = readl(virtbase + COH901318_INT_STATUS2);
if (unlikely(status1 == 0 && status2 == 0)) {
dev_warn(base->dev, "spurious DMA IRQ from no channel!\n");
return IRQ_HANDLED;
}
/* TODO: consider handle IRQ in tasklet here to
* minimize interrupt latency */
/* Check the first 32 DMA channels for IRQ */
while (status1) {
/* Find first bit set, return as a number. */
i = ffs(status1) - 1;
ch = i;
cohc = &base->chans[ch];
spin_lock(&cohc->lock);
/* Mask off this bit */
status1 &= ~(1 << i);
/* Check the individual channel bits */
if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) {
dev_crit(COHC_2_DEV(cohc),
"DMA bus error on channel %d!\n", ch);
BUG_ON(1);
/* Clear BE interrupt */
__set_bit(i, virtbase + COH901318_BE_INT_CLEAR1);
} else {
/* Caused by TC, really? */
if (unlikely(!test_bit(i, virtbase +
COH901318_TC_INT_STATUS1))) {
dev_warn(COHC_2_DEV(cohc),
"ignoring interrupt not caused by terminal count on channel %d\n", ch);
/* Clear TC interrupt */
BUG_ON(1);
__set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
} else {
/* Enable powersave if transfer has finished */
if (!(readl(virtbase + COH901318_CX_STAT +
COH901318_CX_STAT_SPACING*ch) &
COH901318_CX_STAT_ENABLED)) {
enable_powersave(cohc);
}
/* Must clear TC interrupt before calling
* dma_tc_handle
* in case tc_handle initate a new dma job
*/
__set_bit(i, virtbase + COH901318_TC_INT_CLEAR1);
dma_tc_handle(cohc);
}
}
spin_unlock(&cohc->lock);
}
/* Check the remaining 32 DMA channels for IRQ */
while (status2) {
/* Find first bit set, return as a number. */
i = ffs(status2) - 1;
ch = i + 32;
cohc = &base->chans[ch];
spin_lock(&cohc->lock);
/* Mask off this bit */
status2 &= ~(1 << i);
/* Check the individual channel bits */
if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) {
dev_crit(COHC_2_DEV(cohc),
"DMA bus error on channel %d!\n", ch);
/* Clear BE interrupt */
BUG_ON(1);
__set_bit(i, virtbase + COH901318_BE_INT_CLEAR2);
} else {
/* Caused by TC, really? */
if (unlikely(!test_bit(i, virtbase +
COH901318_TC_INT_STATUS2))) {
dev_warn(COHC_2_DEV(cohc),
"ignoring interrupt not caused by terminal count on channel %d\n", ch);
/* Clear TC interrupt */
__set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
BUG_ON(1);
} else {
/* Enable powersave if transfer has finished */
if (!(readl(virtbase + COH901318_CX_STAT +
COH901318_CX_STAT_SPACING*ch) &
COH901318_CX_STAT_ENABLED)) {
enable_powersave(cohc);
}
/* Must clear TC interrupt before calling
* dma_tc_handle
* in case tc_handle initate a new dma job
*/
__set_bit(i, virtbase + COH901318_TC_INT_CLEAR2);
dma_tc_handle(cohc);
}
}
spin_unlock(&cohc->lock);
}
return IRQ_HANDLED;
}
static int coh901318_alloc_chan_resources(struct dma_chan *chan)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n",
__func__, cohc->id);
if (chan->client_count > 1)
return -EBUSY;
coh901318_config(cohc, NULL);
cohc->allocated = 1;
cohc->completed = chan->cookie = 1;
return 1;
}
static void
coh901318_free_chan_resources(struct dma_chan *chan)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
int channel = cohc->id;
unsigned long flags;
spin_lock_irqsave(&cohc->lock, flags);
/* Disable HW */
writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG +
COH901318_CX_CFG_SPACING*channel);
writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL +
COH901318_CX_CTRL_SPACING*channel);
cohc->allocated = 0;
spin_unlock_irqrestore(&cohc->lock, flags);
chan->device->device_terminate_all(chan);
}
static dma_cookie_t
coh901318_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc,
desc);
struct coh901318_chan *cohc = to_coh901318_chan(tx->chan);
unsigned long flags;
spin_lock_irqsave(&cohc->lock, flags);
tx->cookie = coh901318_assign_cookie(cohc, cohd);
coh901318_desc_queue(cohc, cohd);
spin_unlock_irqrestore(&cohc->lock, flags);
return tx->cookie;
}
static struct dma_async_tx_descriptor *
coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t size, unsigned long flags)
{
struct coh901318_lli *data;
struct coh901318_desc *cohd;
unsigned long flg;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
int lli_len;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
spin_lock_irqsave(&cohc->lock, flg);
dev_vdbg(COHC_2_DEV(cohc),
"[%s] channel %d src 0x%x dest 0x%x size %d\n",
__func__, cohc->id, src, dest, size);
if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last lli */
ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT;
if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size)
lli_len++;
data = coh901318_lli_alloc(&cohc->base->pool, lli_len);
if (data == NULL)
goto err;
cohd = coh901318_desc_get(cohc);
cohd->sg = NULL;
cohd->sg_len = 0;
cohd->data = data;
cohd->pending_irqs =
coh901318_lli_fill_memcpy(
&cohc->base->pool, data, src, size, dest,
cohc_chan_param(cohc)->ctrl_lli_chained,
ctrl_last);
cohd->flags = flags;
COH_DBG(coh901318_list_print(cohc, data));
dma_async_tx_descriptor_init(&cohd->desc, chan);
cohd->desc.tx_submit = coh901318_tx_submit;
spin_unlock_irqrestore(&cohc->lock, flg);
return &cohd->desc;
err:
spin_unlock_irqrestore(&cohc->lock, flg);
return NULL;
}
static struct dma_async_tx_descriptor *
coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_lli *data;
struct coh901318_desc *cohd;
struct scatterlist *sg;
int len = 0;
int size;
int i;
u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
unsigned long flg;
if (!sgl)
goto out;
if (sgl->length == 0)
goto out;
spin_lock_irqsave(&cohc->lock, flg);
dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n",
__func__, sg_len, direction);
if (flags & DMA_PREP_INTERRUPT)
/* Trigger interrupt after last lli */
ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
cohd = coh901318_desc_get(cohc);
cohd->sg = NULL;
cohd->sg_len = 0;
cohd->dir = direction;
if (direction == DMA_TO_DEVICE) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
ctrl_chained |= tx_flags;
ctrl_last |= tx_flags;
ctrl |= tx_flags;
} else if (direction == DMA_FROM_DEVICE) {
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
ctrl_chained |= rx_flags;
ctrl_last |= rx_flags;
ctrl |= rx_flags;
} else
goto err_direction;
dma_async_tx_descriptor_init(&cohd->desc, chan);
cohd->desc.tx_submit = coh901318_tx_submit;
/* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of
* dma elemts required to send the entire sg list
*/
for_each_sg(sgl, sg, sg_len, i) {
unsigned int factor;
size = sg_dma_len(sg);
if (size <= MAX_DMA_PACKET_SIZE) {
len++;
continue;
}
factor = size >> MAX_DMA_PACKET_SIZE_SHIFT;
if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size)
factor++;
len += factor;
}
data = coh901318_lli_alloc(&cohc->base->pool, len);
if (data == NULL)
goto err_dma_alloc;
/* initiate allocated data list */
cohd->pending_irqs =
coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
cohc_dev_addr(cohc),
ctrl_chained,
ctrl,
ctrl_last,
direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
cohd->data = data;
cohd->flags = flags;
COH_DBG(coh901318_list_print(cohc, data));
spin_unlock_irqrestore(&cohc->lock, flg);
return &cohd->desc;
err_dma_alloc:
err_direction:
coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd);
spin_unlock_irqrestore(&cohc->lock, flg);
out:
return NULL;
}
static enum dma_status
coh901318_is_tx_complete(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *done,
dma_cookie_t *used)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
int ret;
last_complete = cohc->completed;
last_used = chan->cookie;
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (done)
*done = last_complete;
if (used)
*used = last_used;
return ret;
}
static void
coh901318_issue_pending(struct dma_chan *chan)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
unsigned long flags;
spin_lock_irqsave(&cohc->lock, flags);
/* Busy means that pending jobs are already being processed */
if (!cohc->busy)
coh901318_queue_start(cohc);
spin_unlock_irqrestore(&cohc->lock, flags);
}
static void
coh901318_terminate_all(struct dma_chan *chan)
{
unsigned long flags;
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_desc *cohd;
void __iomem *virtbase = cohc->base->virtbase;
coh901318_stop(chan);
spin_lock_irqsave(&cohc->lock, flags);
/* Clear any pending BE or TC interrupt */
if (cohc->id < 32) {
writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1);
writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1);
} else {
writel(1 << (cohc->id - 32), virtbase +
COH901318_BE_INT_CLEAR2);
writel(1 << (cohc->id - 32), virtbase +
COH901318_TC_INT_CLEAR2);
}
enable_powersave(cohc);
while ((cohd = coh901318_first_active_get(cohc))) {
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data);
coh901318_desc_remove(cohd);
/* return desc to free-list */
coh901318_desc_free(cohc, cohd);
}
while ((cohd = coh901318_first_queued(cohc))) {
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data);
coh901318_desc_remove(cohd);
/* return desc to free-list */
coh901318_desc_free(cohc, cohd);
}
cohc->nbr_active_done = 0;
cohc->busy = 0;
cohc->pending_irqs = 0;
spin_unlock_irqrestore(&cohc->lock, flags);
}
void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
{
int chans_i;
int i = 0;
struct coh901318_chan *cohc;
INIT_LIST_HEAD(&dma->channels);
for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) {
for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) {
cohc = &base->chans[i];
cohc->base = base;
cohc->chan.device = dma;
cohc->id = i;
/* TODO: do we really need this lock if only one
* client is connected to each channel?
*/
spin_lock_init(&cohc->lock);
cohc->pending_irqs = 0;
cohc->nbr_active_done = 0;
cohc->busy = 0;
INIT_LIST_HEAD(&cohc->free);
INIT_LIST_HEAD(&cohc->active);
INIT_LIST_HEAD(&cohc->queue);
tasklet_init(&cohc->tasklet, dma_tasklet,
(unsigned long) cohc);
list_add_tail(&cohc->chan.device_node,
&dma->channels);
}
}
}
static int __init coh901318_probe(struct platform_device *pdev)
{
int err = 0;
struct coh901318_platform *pdata;
struct coh901318_base *base;
int irq;
struct resource *io;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
goto err_get_resource;
/* Map DMA controller registers to virtual memory */
if (request_mem_region(io->start,
resource_size(io),
pdev->dev.driver->name) == NULL) {
err = -EBUSY;
goto err_request_mem;
}
pdata = pdev->dev.platform_data;
if (!pdata)
goto err_no_platformdata;
base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) +
pdata->max_channels *
sizeof(struct coh901318_chan),
GFP_KERNEL);
if (!base)
goto err_alloc_coh_dma_channels;
base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4);
base->virtbase = ioremap(io->start, resource_size(io));
if (!base->virtbase) {
err = -ENOMEM;
goto err_no_ioremap;
}
base->dev = &pdev->dev;
base->platform = pdata;
spin_lock_init(&base->pm.lock);
base->pm.started_channels = 0;
COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base);
platform_set_drvdata(pdev, base);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
goto err_no_irq;
err = request_irq(irq, dma_irq_handler, IRQF_DISABLED,
"coh901318", base);
if (err) {
dev_crit(&pdev->dev,
"Cannot allocate IRQ for DMA controller!\n");
goto err_request_irq;
}
err = coh901318_pool_create(&base->pool, &pdev->dev,
sizeof(struct coh901318_lli),
32);
if (err)
goto err_pool_create;
/* init channels for device transfers */
coh901318_base_init(&base->dma_slave, base->platform->chans_slave,
base);
dma_cap_zero(base->dma_slave.cap_mask);
dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources;
base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources;
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
base->dma_slave.device_issue_pending = coh901318_issue_pending;
base->dma_slave.device_terminate_all = coh901318_terminate_all;
base->dma_slave.dev = &pdev->dev;
err = dma_async_device_register(&base->dma_slave);
if (err)
goto err_register_slave;
/* init channels for memcpy */
coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy,
base);
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources;
base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources;
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
base->dma_memcpy.dev = &pdev->dev;
err = dma_async_device_register(&base->dma_memcpy);
if (err)
goto err_register_memcpy;
dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
(u32) base->virtbase);
return err;
err_register_memcpy:
dma_async_device_unregister(&base->dma_slave);
err_register_slave:
coh901318_pool_destroy(&base->pool);
err_pool_create:
free_irq(platform_get_irq(pdev, 0), base);
err_request_irq:
err_no_irq:
iounmap(base->virtbase);
err_no_ioremap:
kfree(base);
err_alloc_coh_dma_channels:
err_no_platformdata:
release_mem_region(pdev->resource->start,
resource_size(pdev->resource));
err_request_mem:
err_get_resource:
return err;
}
static int __exit coh901318_remove(struct platform_device *pdev)
{
struct coh901318_base *base = platform_get_drvdata(pdev);
dma_async_device_unregister(&base->dma_memcpy);
dma_async_device_unregister(&base->dma_slave);
coh901318_pool_destroy(&base->pool);
free_irq(platform_get_irq(pdev, 0), base);
kfree(base);
iounmap(base->virtbase);
release_mem_region(pdev->resource->start,
resource_size(pdev->resource));
return 0;
}
static struct platform_driver coh901318_driver = {
.remove = __exit_p(coh901318_remove),
.driver = {
.name = "coh901318",
},
};
int __init coh901318_init(void)
{
return platform_driver_probe(&coh901318_driver, coh901318_probe);
}
subsys_initcall(coh901318_init);
void __exit coh901318_exit(void)
{
platform_driver_unregister(&coh901318_driver);
}
module_exit(coh901318_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Per Friden");
/*
* driver/dma/coh901318_lli.c
*
* Copyright (C) 2007-2009 ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
* Support functions for handling lli for dma
* Author: Per Friden <per.friden@stericsson.com>
*/
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/dmapool.h>
#include <linux/memory.h>
#include <mach/coh901318.h>
#include "coh901318_lli.h"
#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
#define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0)
#define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add)
#else
#define DEBUGFS_POOL_COUNTER_RESET(pool)
#define DEBUGFS_POOL_COUNTER_ADD(pool, add)
#endif
static struct coh901318_lli *
coh901318_lli_next(struct coh901318_lli *data)
{
if (data == NULL || data->link_addr == 0)
return NULL;
return (struct coh901318_lli *) data->virt_link_addr;
}
int coh901318_pool_create(struct coh901318_pool *pool,
struct device *dev,
size_t size, size_t align)
{
spin_lock_init(&pool->lock);
pool->dev = dev;
pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0);
DEBUGFS_POOL_COUNTER_RESET(pool);
return 0;
}
int coh901318_pool_destroy(struct coh901318_pool *pool)
{
dma_pool_destroy(pool->dmapool);
return 0;
}
struct coh901318_lli *
coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
{
int i;
struct coh901318_lli *head;
struct coh901318_lli *lli;
struct coh901318_lli *lli_prev;
dma_addr_t phy;
if (len == 0)
goto err;
spin_lock(&pool->lock);
head = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
if (head == NULL)
goto err;
DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli = head;
lli->phy_this = phy;
for (i = 1; i < len; i++) {
lli_prev = lli;
lli = dma_pool_alloc(pool->dmapool, GFP_NOWAIT, &phy);
if (lli == NULL)
goto err_clean_up;
DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli->phy_this = phy;
lli_prev->link_addr = phy;
lli_prev->virt_link_addr = lli;
}
lli->link_addr = 0x00000000U;
spin_unlock(&pool->lock);
return head;
err:
spin_unlock(&pool->lock);
return NULL;
err_clean_up:
lli_prev->link_addr = 0x00000000U;
spin_unlock(&pool->lock);
coh901318_lli_free(pool, &head);
return NULL;
}
void coh901318_lli_free(struct coh901318_pool *pool,
struct coh901318_lli **lli)
{
struct coh901318_lli *l;
struct coh901318_lli *next;
if (lli == NULL)
return;
l = *lli;
if (l == NULL)
return;
spin_lock(&pool->lock);
while (l->link_addr) {
next = l->virt_link_addr;
dma_pool_free(pool->dmapool, l, l->phy_this);
DEBUGFS_POOL_COUNTER_ADD(pool, -1);
l = next;
}
dma_pool_free(pool->dmapool, l, l->phy_this);
DEBUGFS_POOL_COUNTER_ADD(pool, -1);
spin_unlock(&pool->lock);
*lli = NULL;
}
int
coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t source, unsigned int size,
dma_addr_t destination, u32 ctrl_chained,
u32 ctrl_eom)
{
int s = size;
dma_addr_t src = source;
dma_addr_t dst = destination;
lli->src_addr = src;
lli->dst_addr = dst;
while (lli->link_addr) {
lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
lli->src_addr = src;
lli->dst_addr = dst;
s -= MAX_DMA_PACKET_SIZE;
lli = coh901318_lli_next(lli);
src += MAX_DMA_PACKET_SIZE;
dst += MAX_DMA_PACKET_SIZE;
}
lli->control = ctrl_eom | s;
lli->src_addr = src;
lli->dst_addr = dst;
/* One irq per single transfer */
return 1;
}
int
coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
enum dma_data_direction dir)
{
int s = size;
dma_addr_t src;
dma_addr_t dst;
if (dir == DMA_TO_DEVICE) {
src = buf;
dst = dev_addr;
} else if (dir == DMA_FROM_DEVICE) {
src = dev_addr;
dst = buf;
} else {
return -EINVAL;
}
while (lli->link_addr) {
size_t block_size = MAX_DMA_PACKET_SIZE;
lli->control = ctrl_chained | MAX_DMA_PACKET_SIZE;
/* If we are on the next-to-final block and there will
* be less than half a DMA packet left for the last
* block, then we want to make this block a little
* smaller to balance the sizes. This is meant to
* avoid too small transfers if the buffer size is
* (MAX_DMA_PACKET_SIZE*N + 1) */
if (s < (MAX_DMA_PACKET_SIZE + MAX_DMA_PACKET_SIZE/2))
block_size = MAX_DMA_PACKET_SIZE/2;
s -= block_size;
lli->src_addr = src;
lli->dst_addr = dst;
lli = coh901318_lli_next(lli);
if (dir == DMA_TO_DEVICE)
src += block_size;
else if (dir == DMA_FROM_DEVICE)
dst += block_size;
}
lli->control = ctrl_eom | s;
lli->src_addr = src;
lli->dst_addr = dst;
/* One irq per single transfer */
return 1;
}
int
coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct coh901318_lli *lli,
struct scatterlist *sgl, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
u32 ctrl_last,
enum dma_data_direction dir, u32 ctrl_irq_mask)
{
int i;
struct scatterlist *sg;
u32 ctrl_sg;
dma_addr_t src = 0;
dma_addr_t dst = 0;
int nbr_of_irq = 0;
u32 bytes_to_transfer;
u32 elem_size;
if (lli == NULL)
goto err;
spin_lock(&pool->lock);
if (dir == DMA_TO_DEVICE)
dst = dev_addr;
else if (dir == DMA_FROM_DEVICE)
src = dev_addr;
else
goto err;
for_each_sg(sgl, sg, nents, i) {
if (sg_is_chain(sg)) {
/* sg continues to the next sg-element don't
* send ctrl_finish until the last
* sg-element in the chain
*/
ctrl_sg = ctrl_chained;
} else if (i == nents - 1)
ctrl_sg = ctrl_last;
else
ctrl_sg = ctrl ? ctrl : ctrl_last;
if ((ctrl_sg & ctrl_irq_mask))
nbr_of_irq++;
if (dir == DMA_TO_DEVICE)
/* increment source address */
src = sg_dma_address(sg);
else
/* increment destination address */
dst = sg_dma_address(sg);
bytes_to_transfer = sg_dma_len(sg);
while (bytes_to_transfer) {
u32 val;
if (bytes_to_transfer > MAX_DMA_PACKET_SIZE) {
elem_size = MAX_DMA_PACKET_SIZE;
val = ctrl_chained;
} else {
elem_size = bytes_to_transfer;
val = ctrl_sg;
}
lli->control = val | elem_size;
lli->src_addr = src;
lli->dst_addr = dst;
if (dir == DMA_FROM_DEVICE)
dst += elem_size;
else
src += elem_size;
BUG_ON(lli->link_addr & 3);
bytes_to_transfer -= elem_size;
lli = coh901318_lli_next(lli);
}
}
spin_unlock(&pool->lock);
/* There can be many IRQs per sg transfer */
return nbr_of_irq;
err:
spin_unlock(&pool->lock);
return -EINVAL;
}
/*
* driver/dma/coh901318_lli.h
*
* Copyright (C) 2007-2009 ST-Ericsson
* License terms: GNU General Public License (GPL) version 2
* Support functions for handling lli for coh901318
* Author: Per Friden <per.friden@stericsson.com>
*/
#ifndef COH901318_LLI_H
#define COH901318_LLI_H
#include <mach/coh901318.h>
struct device;
struct coh901318_pool {
spinlock_t lock;
struct dma_pool *dmapool;
struct device *dev;
#ifdef CONFIG_DEBUG_FS
int debugfs_pool_counter;
#endif
};
struct device;
/**
* coh901318_pool_create() - Creates an dma pool for lli:s
* @pool: pool handle
* @dev: dma device
* @lli_nbr: number of lli:s in the pool
* @algin: adress alignemtn of lli:s
* returns 0 on success otherwise none zero
*/
int coh901318_pool_create(struct coh901318_pool *pool,
struct device *dev,
size_t lli_nbr, size_t align);
/**
* coh901318_pool_destroy() - Destroys the dma pool
* @pool: pool handle
* returns 0 on success otherwise none zero
*/
int coh901318_pool_destroy(struct coh901318_pool *pool);
/**
* coh901318_lli_alloc() - Allocates a linked list
*
* @pool: pool handle
* @len: length to list
* return: none NULL if success otherwise NULL
*/
struct coh901318_lli *
coh901318_lli_alloc(struct coh901318_pool *pool,
unsigned int len);
/**
* coh901318_lli_free() - Returns the linked list items to the pool
* @pool: pool handle
* @lli: reference to lli pointer to be freed
*/
void coh901318_lli_free(struct coh901318_pool *pool,
struct coh901318_lli **lli);
/**
* coh901318_lli_fill_memcpy() - Prepares the lli:s for dma memcpy
* @pool: pool handle
* @lli: allocated lli
* @src: src address
* @size: transfer size
* @dst: destination address
* @ctrl_chained: ctrl for chained lli
* @ctrl_last: ctrl for the last lli
* returns number of CPU interrupts for the lli, negative on error.
*/
int
coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t src, unsigned int size,
dma_addr_t dst, u32 ctrl_chained, u32 ctrl_last);
/**
* coh901318_lli_fill_single() - Prepares the lli:s for dma single transfer
* @pool: pool handle
* @lli: allocated lli
* @buf: transfer buffer
* @size: transfer size
* @dev_addr: address of periphal
* @ctrl_chained: ctrl for chained lli
* @ctrl_last: ctrl for the last lli
* @dir: direction of transfer (to or from device)
* returns number of CPU interrupts for the lli, negative on error.
*/
int
coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
enum dma_data_direction dir);
/**
* coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
* @pool: pool handle
* @lli: allocated lli
* @sg: scatter gather list
* @nents: number of entries in sg
* @dev_addr: address of periphal
* @ctrl_chained: ctrl for chained lli
* @ctrl: ctrl of middle lli
* @ctrl_last: ctrl for the last lli
* @dir: direction of transfer (to or from device)
* @ctrl_irq_mask: ctrl mask for CPU interrupt
* returns number of CPU interrupts for the lli, negative on error.
*/
int
coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct coh901318_lli *lli,
struct scatterlist *sg, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained,
u32 ctrl, u32 ctrl_last,
enum dma_data_direction dir, u32 ctrl_irq_mask);
#endif /* COH901318_LLI_H */
......@@ -298,10 +298,6 @@ static int dmatest_func(void *data)
total_tests++;
len = dmatest_random() % test_buf_size + 1;
src_off = dmatest_random() % (test_buf_size - len + 1);
dst_off = dmatest_random() % (test_buf_size - len + 1);
/* honor alignment restrictions */
if (thread->type == DMA_MEMCPY)
align = dev->copy_align;
......@@ -310,7 +306,19 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_PQ)
align = dev->pq_align;
if (1 << align > test_buf_size) {
pr_err("%u-byte buffer too small for %d-byte alignment\n",
test_buf_size, 1 << align);
break;
}
len = dmatest_random() % test_buf_size + 1;
len = (len >> align) << align;
if (!len)
len = 1 << align;
src_off = dmatest_random() % (test_buf_size - len + 1);
dst_off = dmatest_random() % (test_buf_size - len + 1);
src_off = (src_off >> align) << align;
dst_off = (dst_off >> align) << align;
......
......@@ -1470,7 +1470,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
return -ENODEV;
if (!devm_request_mem_region(&pdev->dev, res->start,
res->end - res->start, pdev->name))
resource_size(res), pdev->name))
return -EBUSY;
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
......@@ -1542,7 +1542,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev)
iop_chan->device = adev;
iop_chan->mmr_base = devm_ioremap(&pdev->dev, res->start,
res->end - res->start);
resource_size(res));
if (!iop_chan->mmr_base) {
ret = -ENOMEM;
goto err_free_iop_chan;
......
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += adma.o
This source diff could not be displayed because it is too large. You can view the blob instead.
/*
* 2006-2009 (C) DENX Software Engineering.
*
* Author: Yuri Tikhonov <yur@emcraft.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of
* any kind, whether express or implied.
*/
#ifndef _PPC440SPE_ADMA_H
#define _PPC440SPE_ADMA_H
#include <linux/types.h>
#include "dma.h"
#include "xor.h"
#define to_ppc440spe_adma_chan(chan) \
container_of(chan, struct ppc440spe_adma_chan, common)
#define to_ppc440spe_adma_device(dev) \
container_of(dev, struct ppc440spe_adma_device, common)
#define tx_to_ppc440spe_adma_slot(tx) \
container_of(tx, struct ppc440spe_adma_desc_slot, async_tx)
/* Default polynomial (for 440SP is only available) */
#define PPC440SPE_DEFAULT_POLY 0x4d
#define PPC440SPE_ADMA_ENGINES_NUM (XOR_ENGINES_NUM + DMA_ENGINES_NUM)
#define PPC440SPE_ADMA_WATCHDOG_MSEC 3
#define PPC440SPE_ADMA_THRESHOLD 1
#define PPC440SPE_DMA0_ID 0
#define PPC440SPE_DMA1_ID 1
#define PPC440SPE_XOR_ID 2
#define PPC440SPE_ADMA_DMA_MAX_BYTE_COUNT 0xFFFFFFUL
/* this is the XOR_CBBCR width */
#define PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT (1 << 31)
#define PPC440SPE_ADMA_ZERO_SUM_MAX_BYTE_COUNT PPC440SPE_ADMA_XOR_MAX_BYTE_COUNT
#define PPC440SPE_RXOR_RUN 0
#define MQ0_CF2H_RXOR_BS_MASK 0x1FF
#undef ADMA_LL_DEBUG
/**
* struct ppc440spe_adma_device - internal representation of an ADMA device
* @dev: device
* @dma_reg: base for DMAx register access
* @xor_reg: base for XOR register access
* @i2o_reg: base for I2O register access
* @id: HW ADMA Device selector
* @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
* @dma_desc_pool: base of DMA descriptor region (DMA address)
* @pool_size: size of the pool
* @irq: DMAx or XOR irq number
* @err_irq: DMAx error irq number
* @common: embedded struct dma_device
*/
struct ppc440spe_adma_device {
struct device *dev;
struct dma_regs __iomem *dma_reg;
struct xor_regs __iomem *xor_reg;
struct i2o_regs __iomem *i2o_reg;
int id;
void *dma_desc_pool_virt;
dma_addr_t dma_desc_pool;
size_t pool_size;
int irq;
int err_irq;
struct dma_device common;
};
/**
* struct ppc440spe_adma_chan - internal representation of an ADMA channel
* @lock: serializes enqueue/dequeue operations to the slot pool
* @device: parent device
* @chain: device chain view of the descriptors
* @common: common dmaengine channel object members
* @all_slots: complete domain of slots usable by the channel
* @pending: allows batching of hardware operations
* @completed_cookie: identifier for the most recently completed operation
* @slots_allocated: records the actual size of the descriptor slot pool
* @hw_chain_inited: h/w descriptor chain initialization flag
* @irq_tasklet: bottom half where ppc440spe_adma_slot_cleanup runs
* @needs_unmap: if buffers should not be unmapped upon final processing
* @pdest_page: P destination page for async validate operation
* @qdest_page: Q destination page for async validate operation
* @pdest: P dma addr for async validate operation
* @qdest: Q dma addr for async validate operation
*/
struct ppc440spe_adma_chan {
spinlock_t lock;
struct ppc440spe_adma_device *device;
struct list_head chain;
struct dma_chan common;
struct list_head all_slots;
struct ppc440spe_adma_desc_slot *last_used;
int pending;
dma_cookie_t completed_cookie;
int slots_allocated;
int hw_chain_inited;
struct tasklet_struct irq_tasklet;
u8 needs_unmap;
struct page *pdest_page;
struct page *qdest_page;
dma_addr_t pdest;
dma_addr_t qdest;
};
struct ppc440spe_rxor {
u32 addrl;
u32 addrh;
int len;
int xor_count;
int addr_count;
int desc_count;
int state;
};
/**
* struct ppc440spe_adma_desc_slot - PPC440SPE-ADMA software descriptor
* @phys: hardware address of the hardware descriptor chain
* @group_head: first operation in a transaction
* @hw_next: pointer to the next descriptor in chain
* @async_tx: support for the async_tx api
* @slot_node: node on the iop_adma_chan.all_slots list
* @chain_node: node on the op_adma_chan.chain list
* @group_list: list of slots that make up a multi-descriptor transaction
* for example transfer lengths larger than the supported hw max
* @unmap_len: transaction bytecount
* @hw_desc: virtual address of the hardware descriptor chain
* @stride: currently chained or not
* @idx: pool index
* @slot_cnt: total slots used in an transaction (group of operations)
* @src_cnt: number of sources set in this descriptor
* @dst_cnt: number of destinations set in the descriptor
* @slots_per_op: number of slots per operation
* @descs_per_op: number of slot per P/Q operation see comment
* for ppc440spe_prep_dma_pqxor function
* @flags: desc state/type
* @reverse_flags: 1 if a corresponding rxor address uses reversed address order
* @xor_check_result: result of zero sum
* @crc32_result: result crc calculation
*/
struct ppc440spe_adma_desc_slot {
dma_addr_t phys;
struct ppc440spe_adma_desc_slot *group_head;
struct ppc440spe_adma_desc_slot *hw_next;
struct dma_async_tx_descriptor async_tx;
struct list_head slot_node;
struct list_head chain_node; /* node in channel ops list */
struct list_head group_list; /* list */
unsigned int unmap_len;
void *hw_desc;
u16 stride;
u16 idx;
u16 slot_cnt;
u8 src_cnt;
u8 dst_cnt;
u8 slots_per_op;
u8 descs_per_op;
unsigned long flags;
unsigned long reverse_flags[8];
#define PPC440SPE_DESC_INT 0 /* generate interrupt on complete */
#define PPC440SPE_ZERO_P 1 /* clear P destionaion */
#define PPC440SPE_ZERO_Q 2 /* clear Q destination */
#define PPC440SPE_COHERENT 3 /* src/dst are coherent */
#define PPC440SPE_DESC_WXOR 4 /* WXORs are in chain */
#define PPC440SPE_DESC_RXOR 5 /* RXOR is in chain */
#define PPC440SPE_DESC_RXOR123 8 /* CDB for RXOR123 operation */
#define PPC440SPE_DESC_RXOR124 9 /* CDB for RXOR124 operation */
#define PPC440SPE_DESC_RXOR125 10 /* CDB for RXOR125 operation */
#define PPC440SPE_DESC_RXOR12 11 /* CDB for RXOR12 operation */
#define PPC440SPE_DESC_RXOR_REV 12 /* CDB has srcs in reversed order */
#define PPC440SPE_DESC_PCHECK 13
#define PPC440SPE_DESC_QCHECK 14
#define PPC440SPE_DESC_RXOR_MSK 0x3
struct ppc440spe_rxor rxor_cursor;
union {
u32 *xor_check_result;
u32 *crc32_result;
};
};
#endif /* _PPC440SPE_ADMA_H */
/*
* 440SPe's DMA engines support header file
*
* 2006-2009 (C) DENX Software Engineering.
*
* Author: Yuri Tikhonov <yur@emcraft.com>
*
* This file is licensed under the term of the GNU General Public License
* version 2. The program licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef _PPC440SPE_DMA_H
#define _PPC440SPE_DMA_H
#include <linux/types.h>
/* Number of elements in the array with statical CDBs */
#define MAX_STAT_DMA_CDBS 16
/* Number of DMA engines available on the contoller */
#define DMA_ENGINES_NUM 2
/* Maximum h/w supported number of destinations */
#define DMA_DEST_MAX_NUM 2
/* FIFO's params */
#define DMA0_FIFO_SIZE 0x1000
#define DMA1_FIFO_SIZE 0x1000
#define DMA_FIFO_ENABLE (1<<12)
/* DMA Configuration Register. Data Transfer Engine PLB Priority: */
#define DMA_CFG_DXEPR_LP (0<<26)
#define DMA_CFG_DXEPR_HP (3<<26)
#define DMA_CFG_DXEPR_HHP (2<<26)
#define DMA_CFG_DXEPR_HHHP (1<<26)
/* DMA Configuration Register. DMA FIFO Manager PLB Priority: */
#define DMA_CFG_DFMPP_LP (0<<23)
#define DMA_CFG_DFMPP_HP (3<<23)
#define DMA_CFG_DFMPP_HHP (2<<23)
#define DMA_CFG_DFMPP_HHHP (1<<23)
/* DMA Configuration Register. Force 64-byte Alignment */
#define DMA_CFG_FALGN (1 << 19)
/*UIC0:*/
#define D0CPF_INT (1<<12)
#define D0CSF_INT (1<<11)
#define D1CPF_INT (1<<10)
#define D1CSF_INT (1<<9)
/*UIC1:*/
#define DMAE_INT (1<<9)
/* I2O IOP Interrupt Mask Register */
#define I2O_IOPIM_P0SNE (1<<3)
#define I2O_IOPIM_P0EM (1<<5)
#define I2O_IOPIM_P1SNE (1<<6)
#define I2O_IOPIM_P1EM (1<<8)
/* DMA CDB fields */
#define DMA_CDB_MSK (0xF)
#define DMA_CDB_64B_ADDR (1<<2)
#define DMA_CDB_NO_INT (1<<3)
#define DMA_CDB_STATUS_MSK (0x3)
#define DMA_CDB_ADDR_MSK (0xFFFFFFF0)
/* DMA CDB OpCodes */
#define DMA_CDB_OPC_NO_OP (0x00)
#define DMA_CDB_OPC_MV_SG1_SG2 (0x01)
#define DMA_CDB_OPC_MULTICAST (0x05)
#define DMA_CDB_OPC_DFILL128 (0x24)
#define DMA_CDB_OPC_DCHECK128 (0x23)
#define DMA_CUED_XOR_BASE (0x10000000)
#define DMA_CUED_XOR_HB (0x00000008)
#ifdef CONFIG_440SP
#define DMA_CUED_MULT1_OFF 0
#define DMA_CUED_MULT2_OFF 8
#define DMA_CUED_MULT3_OFF 16
#define DMA_CUED_REGION_OFF 24
#define DMA_CUED_XOR_WIN_MSK (0xFC000000)
#else
#define DMA_CUED_MULT1_OFF 2
#define DMA_CUED_MULT2_OFF 10
#define DMA_CUED_MULT3_OFF 18
#define DMA_CUED_REGION_OFF 26
#define DMA_CUED_XOR_WIN_MSK (0xF0000000)
#endif
#define DMA_CUED_REGION_MSK 0x3
#define DMA_RXOR123 0x0
#define DMA_RXOR124 0x1
#define DMA_RXOR125 0x2
#define DMA_RXOR12 0x3
/* S/G addresses */
#define DMA_CDB_SG_SRC 1
#define DMA_CDB_SG_DST1 2
#define DMA_CDB_SG_DST2 3
/*
* DMAx engines Command Descriptor Block Type
*/
struct dma_cdb {
/*
* Basic CDB structure (Table 20-17, p.499, 440spe_um_1_22.pdf)
*/
u8 pad0[2]; /* reserved */
u8 attr; /* attributes */
u8 opc; /* opcode */
u32 sg1u; /* upper SG1 address */
u32 sg1l; /* lower SG1 address */
u32 cnt; /* SG count, 3B used */
u32 sg2u; /* upper SG2 address */
u32 sg2l; /* lower SG2 address */
u32 sg3u; /* upper SG3 address */
u32 sg3l; /* lower SG3 address */
};
/*
* DMAx hardware registers (p.515 in 440SPe UM 1.22)
*/
struct dma_regs {
u32 cpfpl;
u32 cpfph;
u32 csfpl;
u32 csfph;
u32 dsts;
u32 cfg;
u8 pad0[0x8];
u16 cpfhp;
u16 cpftp;
u16 csfhp;
u16 csftp;
u8 pad1[0x8];
u32 acpl;
u32 acph;
u32 s1bpl;
u32 s1bph;
u32 s2bpl;
u32 s2bph;
u32 s3bpl;
u32 s3bph;
u8 pad2[0x10];
u32 earl;
u32 earh;
u8 pad3[0x8];
u32 seat;
u32 sead;
u32 op;
u32 fsiz;
};
/*
* I2O hardware registers (p.528 in 440SPe UM 1.22)
*/
struct i2o_regs {
u32 ists;
u32 iseat;
u32 isead;
u8 pad0[0x14];
u32 idbel;
u8 pad1[0xc];
u32 ihis;
u32 ihim;
u8 pad2[0x8];
u32 ihiq;
u32 ihoq;
u8 pad3[0x8];
u32 iopis;
u32 iopim;
u32 iopiq;
u8 iopoq;
u8 pad4[3];
u16 iiflh;
u16 iiflt;
u16 iiplh;
u16 iiplt;
u16 ioflh;
u16 ioflt;
u16 ioplh;
u16 ioplt;
u32 iidc;
u32 ictl;
u32 ifcpp;
u8 pad5[0x4];
u16 mfac0;
u16 mfac1;
u16 mfac2;
u16 mfac3;
u16 mfac4;
u16 mfac5;
u16 mfac6;
u16 mfac7;
u16 ifcfh;
u16 ifcht;
u8 pad6[0x4];
u32 iifmc;
u32 iodb;
u32 iodbc;
u32 ifbal;
u32 ifbah;
u32 ifsiz;
u32 ispd0;
u32 ispd1;
u32 ispd2;
u32 ispd3;
u32 ihipl;
u32 ihiph;
u32 ihopl;
u32 ihoph;
u32 iiipl;
u32 iiiph;
u32 iiopl;
u32 iioph;
u32 ifcpl;
u32 ifcph;
u8 pad7[0x8];
u32 iopt;
};
#endif /* _PPC440SPE_DMA_H */
/*
* 440SPe's XOR engines support header file
*
* 2006-2009 (C) DENX Software Engineering.
*
* Author: Yuri Tikhonov <yur@emcraft.com>
*
* This file is licensed under the term of the GNU General Public License
* version 2. The program licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#ifndef _PPC440SPE_XOR_H
#define _PPC440SPE_XOR_H
#include <linux/types.h>
/* Number of XOR engines available on the contoller */
#define XOR_ENGINES_NUM 1
/* Number of operands supported in the h/w */
#define XOR_MAX_OPS 16
/*
* XOR Command Block Control Register bits
*/
#define XOR_CBCR_LNK_BIT (1<<31) /* link present */
#define XOR_CBCR_TGT_BIT (1<<30) /* target present */
#define XOR_CBCR_CBCE_BIT (1<<29) /* command block compete enable */
#define XOR_CBCR_RNZE_BIT (1<<28) /* result not zero enable */
#define XOR_CBCR_XNOR_BIT (1<<15) /* XOR/XNOR */
#define XOR_CDCR_OAC_MSK (0x7F) /* operand address count */
/*
* XORCore Status Register bits
*/
#define XOR_SR_XCP_BIT (1<<31) /* core processing */
#define XOR_SR_ICB_BIT (1<<17) /* invalid CB */
#define XOR_SR_IC_BIT (1<<16) /* invalid command */
#define XOR_SR_IPE_BIT (1<<15) /* internal parity error */
#define XOR_SR_RNZ_BIT (1<<2) /* result not Zero */
#define XOR_SR_CBC_BIT (1<<1) /* CB complete */
#define XOR_SR_CBLC_BIT (1<<0) /* CB list complete */
/*
* XORCore Control Set and Reset Register bits
*/
#define XOR_CRSR_XASR_BIT (1<<31) /* soft reset */
#define XOR_CRSR_XAE_BIT (1<<30) /* enable */
#define XOR_CRSR_RCBE_BIT (1<<29) /* refetch CB enable */
#define XOR_CRSR_PAUS_BIT (1<<28) /* pause */
#define XOR_CRSR_64BA_BIT (1<<27) /* 64/32 CB format */
#define XOR_CRSR_CLP_BIT (1<<25) /* continue list processing */
/*
* XORCore Interrupt Enable Register
*/
#define XOR_IE_ICBIE_BIT (1<<17) /* Invalid Command Block IRQ Enable */
#define XOR_IE_ICIE_BIT (1<<16) /* Invalid Command IRQ Enable */
#define XOR_IE_RPTIE_BIT (1<<14) /* Read PLB Timeout Error IRQ Enable */
#define XOR_IE_CBCIE_BIT (1<<1) /* CB complete interrupt enable */
#define XOR_IE_CBLCI_BIT (1<<0) /* CB list complete interrupt enable */
/*
* XOR Accelerator engine Command Block Type
*/
struct xor_cb {
/*
* Basic 64-bit format XOR CB (Table 19-1, p.463, 440spe_um_1_22.pdf)
*/
u32 cbc; /* control */
u32 cbbc; /* byte count */
u32 cbs; /* status */
u8 pad0[4]; /* reserved */
u32 cbtah; /* target address high */
u32 cbtal; /* target address low */
u32 cblah; /* link address high */
u32 cblal; /* link address low */
struct {
u32 h;
u32 l;
} __attribute__ ((packed)) ops[16];
} __attribute__ ((packed));
/*
* XOR hardware registers Table 19-3, UM 1.22
*/
struct xor_regs {
u32 op_ar[16][2]; /* operand address[0]-high,[1]-low registers */
u8 pad0[352]; /* reserved */
u32 cbcr; /* CB control register */
u32 cbbcr; /* CB byte count register */
u32 cbsr; /* CB status register */
u8 pad1[4]; /* reserved */
u32 cbtahr; /* operand target address high register */
u32 cbtalr; /* operand target address low register */
u32 cblahr; /* CB link address high register */
u32 cblalr; /* CB link address low register */
u32 crsr; /* control set register */
u32 crrr; /* control reset register */
u32 ccbahr; /* current CB address high register */
u32 ccbalr; /* current CB address low register */
u32 plbr; /* PLB configuration register */
u32 ier; /* interrupt enable register */
u32 pecr; /* parity error count register */
u32 sr; /* status register */
u32 revidr; /* revision ID register */
};
#endif /* _PPC440SPE_XOR_H */
......@@ -80,17 +80,17 @@ static int sh_dmae_rst(int id)
unsigned short dmaor;
sh_dmae_ctl_stop(id);
dmaor = (dmaor_read_reg(id)|DMAOR_INIT);
dmaor = dmaor_read_reg(id) | DMAOR_INIT;
dmaor_write_reg(id, dmaor);
if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) {
if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) {
pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n");
return -EINVAL;
}
return 0;
}
static int dmae_is_idle(struct sh_dmae_chan *sh_chan)
static int dmae_is_busy(struct sh_dmae_chan *sh_chan)
{
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
if (chcr & CHCR_DE) {
......@@ -110,15 +110,14 @@ static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw)
{
sh_dmae_writel(sh_chan, hw.sar, SAR);
sh_dmae_writel(sh_chan, hw.dar, DAR);
sh_dmae_writel(sh_chan,
(hw.tcr >> calc_xmit_shift(sh_chan)), TCR);
sh_dmae_writel(sh_chan, hw.tcr >> calc_xmit_shift(sh_chan), TCR);
}
static void dmae_start(struct sh_dmae_chan *sh_chan)
{
u32 chcr = sh_dmae_readl(sh_chan, CHCR);
chcr |= (CHCR_DE|CHCR_IE);
chcr |= CHCR_DE | CHCR_IE;
sh_dmae_writel(sh_chan, chcr, CHCR);
}
......@@ -132,7 +131,7 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
{
int ret = dmae_is_idle(sh_chan);
int ret = dmae_is_busy(sh_chan);
/* When DMA was working, can not set data to CHCR */
if (ret)
return ret;
......@@ -149,7 +148,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
{
u32 addr;
int shift = 0;
int ret = dmae_is_idle(sh_chan);
int ret = dmae_is_busy(sh_chan);
if (ret)
return ret;
......@@ -307,7 +306,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
new = sh_dmae_get_desc(sh_chan);
if (!new) {
dev_err(sh_chan->dev,
"No free memory for link descriptor\n");
"No free memory for link descriptor\n");
goto err_get_desc;
}
......@@ -388,7 +387,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
struct sh_dmae_regs hw;
/* DMA work check */
if (dmae_is_idle(sh_chan))
if (dmae_is_busy(sh_chan))
return;
/* Find the first un-transfer desciptor */
......@@ -497,8 +496,9 @@ static void dmae_do_tasklet(unsigned long data)
struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data;
struct sh_desc *desc, *_desc, *cur_desc = NULL;
u32 sar_buf = sh_dmae_readl(sh_chan, SAR);
list_for_each_entry_safe(desc, _desc,
&sh_chan->ld_queue, node) {
&sh_chan->ld_queue, node) {
if ((desc->hw.sar + desc->hw.tcr) == sar_buf) {
cur_desc = desc;
break;
......@@ -543,8 +543,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
/* alloc channel */
new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL);
if (!new_sh_chan) {
dev_err(shdev->common.dev, "No free memory for allocating "
"dma channels!\n");
dev_err(shdev->common.dev,
"No free memory for allocating dma channels!\n");
return -ENOMEM;
}
......@@ -586,8 +586,8 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
"sh-dmae%d", new_sh_chan->id);
/* set up channel irq */
err = request_irq(irq, &sh_dmae_interrupt,
irqflags, new_sh_chan->dev_id, new_sh_chan);
err = request_irq(irq, &sh_dmae_interrupt, irqflags,
new_sh_chan->dev_id, new_sh_chan);
if (err) {
dev_err(shdev->common.dev, "DMA channel %d request_irq error "
"with return %d\n", id, err);
......@@ -676,6 +676,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
shdev->common.device_is_tx_complete = sh_dmae_is_complete;
shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending;
shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
shdev->common.copy_align = 5;
#if defined(CONFIG_CPU_SH4)
/* Non Mix IRQ mode SH7722/SH7730 etc... */
......@@ -688,8 +690,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
}
for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) {
err = request_irq(eirq[ecnt], sh_dmae_err,
irqflags, "DMAC Address Error", shdev);
err = request_irq(eirq[ecnt], sh_dmae_err, irqflags,
"DMAC Address Error", shdev);
if (err) {
dev_err(&pdev->dev, "DMA device request_irq"
"error (irq %d) with return %d\n",
......
......@@ -35,15 +35,15 @@ struct sh_desc {
struct sh_dmae_chan {
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
struct list_head ld_queue; /* Link descriptors queue */
struct list_head ld_free; /* Link descriptors free */
struct dma_chan common; /* DMA common channel */
struct device *dev; /* Channel device */
spinlock_t desc_lock; /* Descriptor operation lock */
struct list_head ld_queue; /* Link descriptors queue */
struct list_head ld_free; /* Link descriptors free */
struct dma_chan common; /* DMA common channel */
struct device *dev; /* Channel device */
struct tasklet_struct tasklet; /* Tasklet */
int descs_allocated; /* desc count */
int descs_allocated; /* desc count */
int id; /* Raw id of this channel */
char dev_id[16]; /* unique name per DMAC of channel */
char dev_id[16]; /* unique name per DMAC of channel */
/* Set chcr */
int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs);
......
......@@ -74,7 +74,7 @@ enum dma_transaction_type {
* control completion, and communicate status.
* @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
* this transaction
* @DMA_CTRL_ACK - the descriptor cannot be reused until the client
* @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client
* acknowledges receipt, i.e. has has a chance to establish any dependency
* chains
* @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment