Commit 8f5ba31a authored by Robert Jarzmik's avatar Robert Jarzmik Committed by Brian Norris

mtd: nand: pxa3xx-nand: switch to dmaengine

Now pxa architecture has a dmaengine driver, remove the access to direct
dma registers in favor of the more generic dmaengine code.

This should be also applicable for mmp and orion, provided they work in
device-tree environment.

This patch also removes the previous hack which was necessary to make
the driver work in a devicetree environment.
Signed-off-by: default avatarRobert Jarzmik <robert.jarzmik@free.fr>
Reviewed-by: default avatarEzequiel Garcia <ezequiel@vanguardiasur.com.ar>
Tested-by: default avatarEzequiel Garcia <ezequiel@vanguardiasur.com.ar>
[Brian: fixup use of 'enum dma_transfer_direction']
Signed-off-by: default avatarBrian Norris <computersforpeace@gmail.com>
parent e1305df1
...@@ -15,7 +15,9 @@ ...@@ -15,7 +15,9 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/dma/pxa-dma.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/mtd/mtd.h> #include <linux/mtd/mtd.h>
...@@ -33,10 +35,6 @@ ...@@ -33,10 +35,6 @@
#define ARCH_HAS_DMA #define ARCH_HAS_DMA
#endif #endif
#ifdef ARCH_HAS_DMA
#include <mach/dma.h>
#endif
#include <linux/platform_data/mtd-nand-pxa3xx.h> #include <linux/platform_data/mtd-nand-pxa3xx.h>
#define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200) #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200)
...@@ -201,6 +199,10 @@ struct pxa3xx_nand_info { ...@@ -201,6 +199,10 @@ struct pxa3xx_nand_info {
unsigned int oob_buff_pos; unsigned int oob_buff_pos;
/* DMA information */ /* DMA information */
struct scatterlist sg;
enum dma_data_direction dma_dir;
struct dma_chan *dma_chan;
dma_cookie_t dma_cookie;
int drcmr_dat; int drcmr_dat;
int drcmr_cmd; int drcmr_cmd;
...@@ -208,8 +210,6 @@ struct pxa3xx_nand_info { ...@@ -208,8 +210,6 @@ struct pxa3xx_nand_info {
unsigned char *oob_buff; unsigned char *oob_buff;
dma_addr_t data_buff_phys; dma_addr_t data_buff_phys;
int data_dma_ch; int data_dma_ch;
struct pxa_dma_desc *data_desc;
dma_addr_t data_desc_addr;
struct pxa3xx_nand_host *host[NUM_CHIP_SELECT]; struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
unsigned int state; unsigned int state;
...@@ -492,6 +492,9 @@ static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info) ...@@ -492,6 +492,9 @@ static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
ndcr &= ~NDCR_ND_RUN; ndcr &= ~NDCR_ND_RUN;
nand_writel(info, NDCR, ndcr); nand_writel(info, NDCR, ndcr);
} }
if (info->dma_chan)
dmaengine_terminate_all(info->dma_chan);
/* clear status bits */ /* clear status bits */
nand_writel(info, NDSR, NDSR_MASK); nand_writel(info, NDSR, NDSR_MASK);
} }
...@@ -583,57 +586,61 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) ...@@ -583,57 +586,61 @@ static void handle_data_pio(struct pxa3xx_nand_info *info)
info->data_size -= do_bytes; info->data_size -= do_bytes;
} }
#ifdef ARCH_HAS_DMA static void pxa3xx_nand_data_dma_irq(void *data)
static void start_data_dma(struct pxa3xx_nand_info *info)
{ {
struct pxa_dma_desc *desc = info->data_desc; struct pxa3xx_nand_info *info = data;
int dma_len = ALIGN(info->data_size + info->oob_size, 32); struct dma_tx_state state;
enum dma_status status;
desc->ddadr = DDADR_STOP; status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
desc->dcmd = DCMD_ENDIRQEN | DCMD_WIDTH4 | DCMD_BURST32 | dma_len; if (likely(status == DMA_COMPLETE)) {
info->state = STATE_DMA_DONE;
} else {
dev_err(&info->pdev->dev, "DMA error on data channel\n");
info->retcode = ERR_DMABUSERR;
}
dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
enable_int(info, NDCR_INT_MASK);
}
static void start_data_dma(struct pxa3xx_nand_info *info)
{
enum dma_transfer_direction direction;
struct dma_async_tx_descriptor *tx;
switch (info->state) { switch (info->state) {
case STATE_DMA_WRITING: case STATE_DMA_WRITING:
desc->dsadr = info->data_buff_phys; info->dma_dir = DMA_TO_DEVICE;
desc->dtadr = info->mmio_phys + NDDB; direction = DMA_MEM_TO_DEV;
desc->dcmd |= DCMD_INCSRCADDR | DCMD_FLOWTRG;
break; break;
case STATE_DMA_READING: case STATE_DMA_READING:
desc->dtadr = info->data_buff_phys; info->dma_dir = DMA_FROM_DEVICE;
desc->dsadr = info->mmio_phys + NDDB; direction = DMA_DEV_TO_MEM;
desc->dcmd |= DCMD_INCTRGADDR | DCMD_FLOWSRC;
break; break;
default: default:
dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
info->state); info->state);
BUG(); BUG();
} }
info->sg.length = info->data_size +
(info->oob_size ? info->spare_size + info->ecc_size : 0);
dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
DRCMR(info->drcmr_dat) = DRCMR_MAPVLD | info->data_dma_ch; tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
DDADR(info->data_dma_ch) = info->data_desc_addr; DMA_PREP_INTERRUPT);
DCSR(info->data_dma_ch) |= DCSR_RUN; if (!tx) {
} dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
return;
static void pxa3xx_nand_data_dma_irq(int channel, void *data)
{
struct pxa3xx_nand_info *info = data;
uint32_t dcsr;
dcsr = DCSR(channel);
DCSR(channel) = dcsr;
if (dcsr & DCSR_BUSERR) {
info->retcode = ERR_DMABUSERR;
} }
tx->callback = pxa3xx_nand_data_dma_irq;
info->state = STATE_DMA_DONE; tx->callback_param = info;
enable_int(info, NDCR_INT_MASK); info->dma_cookie = dmaengine_submit(tx);
nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ); dma_async_issue_pending(info->dma_chan);
dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
__func__, direction, info->dma_cookie, info->sg.length);
} }
#else
static void start_data_dma(struct pxa3xx_nand_info *info)
{}
#endif
static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data) static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
{ {
...@@ -1319,36 +1326,50 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) ...@@ -1319,36 +1326,50 @@ static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
return 0; return 0;
} }
#ifdef ARCH_HAS_DMA
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{ {
struct platform_device *pdev = info->pdev; struct platform_device *pdev = info->pdev;
int data_desc_offset = info->buf_size - sizeof(struct pxa_dma_desc); struct dma_slave_config config;
dma_cap_mask_t mask;
struct pxad_param param;
int ret;
if (use_dma == 0) {
info->data_buff = kmalloc(info->buf_size, GFP_KERNEL); info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
if (info->data_buff == NULL) if (info->data_buff == NULL)
return -ENOMEM; return -ENOMEM;
if (use_dma == 0)
return 0; return 0;
}
info->data_buff = dma_alloc_coherent(&pdev->dev, info->buf_size, ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
&info->data_buff_phys, GFP_KERNEL); if (ret)
if (info->data_buff == NULL) { return ret;
dev_err(&pdev->dev, "failed to allocate dma buffer\n");
return -ENOMEM;
}
info->data_desc = (void *)info->data_buff + data_desc_offset; sg_init_one(&info->sg, info->data_buff, info->buf_size);
info->data_desc_addr = info->data_buff_phys + data_desc_offset; dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
param.prio = PXAD_PRIO_LOWEST;
param.drcmr = info->drcmr_dat;
info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
&param, &pdev->dev,
"data");
if (!info->dma_chan) {
dev_err(&pdev->dev, "unable to request data dma channel\n");
return -ENODEV;
}
info->data_dma_ch = pxa_request_dma("nand-data", DMA_PRIO_LOW, memset(&config, 0, sizeof(config));
pxa3xx_nand_data_dma_irq, info); config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
if (info->data_dma_ch < 0) { config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
dev_err(&pdev->dev, "failed to request data dma\n"); config.src_addr = info->mmio_phys + NDDB;
dma_free_coherent(&pdev->dev, info->buf_size, config.dst_addr = info->mmio_phys + NDDB;
info->data_buff, info->data_buff_phys); config.src_maxburst = 32;
return info->data_dma_ch; config.dst_maxburst = 32;
ret = dmaengine_slave_config(info->dma_chan, &config);
if (ret < 0) {
dev_err(&info->pdev->dev,
"dma channel configuration failed: %d\n",
ret);
return ret;
} }
/* /*
...@@ -1361,29 +1382,12 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info) ...@@ -1361,29 +1382,12 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info) static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
{ {
struct platform_device *pdev = info->pdev;
if (info->use_dma) { if (info->use_dma) {
pxa_free_dma(info->data_dma_ch); dmaengine_terminate_all(info->dma_chan);
dma_free_coherent(&pdev->dev, info->buf_size, dma_release_channel(info->dma_chan);
info->data_buff, info->data_buff_phys);
} else {
kfree(info->data_buff);
} }
}
#else
static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
{
info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
if (info->data_buff == NULL)
return -ENOMEM;
return 0;
}
static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
{
kfree(info->data_buff); kfree(info->data_buff);
} }
#endif
static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info) static int pxa3xx_nand_sensing(struct pxa3xx_nand_info *info)
{ {
...@@ -1683,16 +1687,6 @@ static int alloc_nand_resource(struct platform_device *pdev) ...@@ -1683,16 +1687,6 @@ static int alloc_nand_resource(struct platform_device *pdev)
return ret; return ret;
if (use_dma) { if (use_dma) {
/*
* This is a dirty hack to make this driver work from
* devicetree bindings. It can be removed once we have
* a prober DMA controller framework for DT.
*/
if (pdev->dev.of_node &&
of_machine_is_compatible("marvell,pxa3xx")) {
info->drcmr_dat = 97;
info->drcmr_cmd = 99;
} else {
r = platform_get_resource(pdev, IORESOURCE_DMA, 0); r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (r == NULL) { if (r == NULL) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
...@@ -1711,7 +1705,6 @@ static int alloc_nand_resource(struct platform_device *pdev) ...@@ -1711,7 +1705,6 @@ static int alloc_nand_resource(struct platform_device *pdev)
} }
info->drcmr_cmd = r->start; info->drcmr_cmd = r->start;
} }
}
irq = platform_get_irq(pdev, 0); irq = platform_get_irq(pdev, 0);
if (irq < 0) { if (irq < 0) {
...@@ -1821,15 +1814,16 @@ static int pxa3xx_nand_probe(struct platform_device *pdev) ...@@ -1821,15 +1814,16 @@ static int pxa3xx_nand_probe(struct platform_device *pdev)
struct pxa3xx_nand_platform_data *pdata; struct pxa3xx_nand_platform_data *pdata;
struct mtd_part_parser_data ppdata = {}; struct mtd_part_parser_data ppdata = {};
struct pxa3xx_nand_info *info; struct pxa3xx_nand_info *info;
int ret, cs, probe_success; int ret, cs, probe_success, dma_available;
#ifndef ARCH_HAS_DMA dma_available = IS_ENABLED(CONFIG_ARM) &&
if (use_dma) { (IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
if (use_dma && !dma_available) {
use_dma = 0; use_dma = 0;
dev_warn(&pdev->dev, dev_warn(&pdev->dev,
"This platform can't do DMA on this device\n"); "This platform can't do DMA on this device\n");
} }
#endif
ret = pxa3xx_nand_probe_dt(pdev); ret = pxa3xx_nand_probe_dt(pdev);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment