Commit eaadcfeb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fixes-3.13-rc4' of...

Merge tag 'dmaengine-fixes-3.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine

Pull dmaengine fixes from Dan Williams:

 - deprecation of net_dma to be removed in 3.14

 - crash regression fix in pl330 from the dmaengine_unmap rework

 - crash regression fix for any channel running raid ops without
   CONFIG_ASYNC_TX_DMA from dmaengine_unmap

 - memory leak regression in mv_xor from dmaengine_unmap

 - build warning regressions in mv_xor, fsldma, ppc4xx, txx9, and
   at_hdmac from dmaengine_unmap

 - sleep in atomic regression in dma_async_memcpy_pg_to_pg

 - new fix in mv_xor for handling channel initialization failures

* tag 'dmaengine-fixes-3.13-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine:
  net_dma: mark broken
  dma: pl330: ensure DMA descriptors are zero-initialised
  dmaengine: fix sleep in atomic
  dmaengine: mv_xor: fix oops when channels fail to initialise
  dma: mv_xor: Use dmaengine_unmap_data for the self-tests
  dmaengine: fix enable for high order unmap pools
  dma: fix build warnings in txx9
  dmatest: fix build warning on mips
  dma: fix fsldma build warnings
  dma: fix build warnings in ppc4xx
  dmaengine: at_hdmac: remove unused function
  dma: mv_xor: remove mv_desc_get_dest_addr()
parents 46dd0835 77873803
...@@ -62,6 +62,7 @@ config INTEL_IOATDMA ...@@ -62,6 +62,7 @@ config INTEL_IOATDMA
tristate "Intel I/OAT DMA support" tristate "Intel I/OAT DMA support"
depends on PCI && X86 depends on PCI && X86
select DMA_ENGINE select DMA_ENGINE
select DMA_ENGINE_RAID
select DCA select DCA
help help
Enable support for the Intel(R) I/OAT DMA engine present Enable support for the Intel(R) I/OAT DMA engine present
...@@ -112,6 +113,7 @@ config MV_XOR ...@@ -112,6 +113,7 @@ config MV_XOR
bool "Marvell XOR engine support" bool "Marvell XOR engine support"
depends on PLAT_ORION depends on PLAT_ORION
select DMA_ENGINE select DMA_ENGINE
select DMA_ENGINE_RAID
select ASYNC_TX_ENABLE_CHANNEL_SWITCH select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help--- ---help---
Enable support for the Marvell XOR engine. Enable support for the Marvell XOR engine.
...@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA ...@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support" tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP depends on 440SPe || 440SP
select DMA_ENGINE select DMA_ENGINE
select DMA_ENGINE_RAID
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
select ASYNC_TX_ENABLE_CHANNEL_SWITCH select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help help
...@@ -352,6 +355,7 @@ config NET_DMA ...@@ -352,6 +355,7 @@ config NET_DMA
bool "Network: TCP receive copy offload" bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET depends on DMA_ENGINE && NET
default (INTEL_IOATDMA || FSL_DMA) default (INTEL_IOATDMA || FSL_DMA)
depends on BROKEN
help help
This enables the use of DMA engines in the network stack to This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles. offload receive copy-to-user operations, freeing CPU cycles.
...@@ -377,4 +381,7 @@ config DMATEST ...@@ -377,4 +381,7 @@ config DMATEST
Simple DMA test client. Say N unless you're debugging a Simple DMA test client. Say N unless you're debugging a
DMA Device driver. DMA Device driver.
config DMA_ENGINE_RAID
bool
endif endif
...@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan) ...@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
{ {
return &chan->dev->device; return &chan->dev->device;
} }
static struct device *chan2parent(struct dma_chan *chan)
{
return chan->dev->device.parent;
}
#if defined(VERBOSE_DEBUG) #if defined(VERBOSE_DEBUG)
static void vdbg_dump_regs(struct at_dma_chan *atchan) static void vdbg_dump_regs(struct at_dma_chan *atchan)
......
...@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool { ...@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
static struct dmaengine_unmap_pool unmap_pool[] = { static struct dmaengine_unmap_pool unmap_pool[] = {
__UNMAP_POOL(2), __UNMAP_POOL(2),
#if IS_ENABLED(CONFIG_ASYNC_TX_DMA) #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
__UNMAP_POOL(16), __UNMAP_POOL(16),
__UNMAP_POOL(128), __UNMAP_POOL(128),
__UNMAP_POOL(256), __UNMAP_POOL(256),
...@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, ...@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
dma_cookie_t cookie; dma_cookie_t cookie;
unsigned long flags; unsigned long flags;
unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO); unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
if (!unmap) if (!unmap)
return -ENOMEM; return -ENOMEM;
......
...@@ -539,9 +539,9 @@ static int dmatest_func(void *data) ...@@ -539,9 +539,9 @@ static int dmatest_func(void *data)
um->len = params->buf_size; um->len = params->buf_size;
for (i = 0; i < src_cnt; i++) { for (i = 0; i < src_cnt; i++) {
unsigned long buf = (unsigned long) thread->srcs[i]; void *buf = thread->srcs[i];
struct page *pg = virt_to_page(buf); struct page *pg = virt_to_page(buf);
unsigned pg_off = buf & ~PAGE_MASK; unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
um->addr[i] = dma_map_page(dev->dev, pg, pg_off, um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
um->len, DMA_TO_DEVICE); um->len, DMA_TO_DEVICE);
...@@ -559,9 +559,9 @@ static int dmatest_func(void *data) ...@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
dsts = &um->addr[src_cnt]; dsts = &um->addr[src_cnt];
for (i = 0; i < dst_cnt; i++) { for (i = 0; i < dst_cnt; i++) {
unsigned long buf = (unsigned long) thread->dsts[i]; void *buf = thread->dsts[i];
struct page *pg = virt_to_page(buf); struct page *pg = virt_to_page(buf);
unsigned pg_off = buf & ~PAGE_MASK; unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len, dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
......
...@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan, ...@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
hw->count = CPU_TO_DMA(chan, count, 32); hw->count = CPU_TO_DMA(chan, count, 32);
} }
static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
{
return DMA_TO_CPU(chan, desc->hw.count, 32);
}
static void set_desc_src(struct fsldma_chan *chan, static void set_desc_src(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t src) struct fsl_dma_ld_hw *hw, dma_addr_t src)
{ {
...@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan, ...@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
} }
static dma_addr_t get_desc_src(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
u64 snoop_bits;
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
}
static void set_desc_dst(struct fsldma_chan *chan, static void set_desc_dst(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t dst) struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{ {
...@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan, ...@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
} }
static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
u64 snoop_bits;
snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
}
static void set_desc_next(struct fsldma_chan *chan, static void set_desc_next(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t next) struct fsl_dma_ld_hw *hw, dma_addr_t next)
{ {
...@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child; struct fsl_desc_sw *child;
unsigned long flags; unsigned long flags;
dma_cookie_t cookie; dma_cookie_t cookie = -EINVAL;
spin_lock_irqsave(&chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
...@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, ...@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
struct fsl_desc_sw *desc) struct fsl_desc_sw *desc)
{ {
struct dma_async_tx_descriptor *txd = &desc->async_tx; struct dma_async_tx_descriptor *txd = &desc->async_tx;
struct device *dev = chan->common.device->dev;
dma_addr_t src = get_desc_src(chan, desc);
dma_addr_t dst = get_desc_dst(chan, desc);
u32 len = get_desc_cnt(chan, desc);
/* Run the link descriptor callback function */ /* Run the link descriptor callback function */
if (txd->callback) { if (txd->callback) {
......
...@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags) ...@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
hw_desc->desc_command = (1 << 31); hw_desc->desc_command = (1 << 31);
} }
static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
{
struct mv_xor_desc *hw_desc = desc->hw_desc;
return hw_desc->phy_dest_addr;
}
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc, static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
u32 byte_count) u32 byte_count)
{ {
...@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan) ...@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
/* /*
* Perform a transaction to verify the HW works. * Perform a transaction to verify the HW works.
*/ */
#define MV_XOR_TEST_SIZE 2000
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{ {
...@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) ...@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
struct dma_chan *dma_chan; struct dma_chan *dma_chan;
dma_cookie_t cookie; dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
int err = 0; int err = 0;
src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!src) if (!src)
return -ENOMEM; return -ENOMEM;
dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL); dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!dest) { if (!dest) {
kfree(src); kfree(src);
return -ENOMEM; return -ENOMEM;
} }
/* Fill in src buffer */ /* Fill in src buffer */
for (i = 0; i < MV_XOR_TEST_SIZE; i++) for (i = 0; i < PAGE_SIZE; i++)
((u8 *) src)[i] = (u8)i; ((u8 *) src)[i] = (u8)i;
dma_chan = &mv_chan->dmachan; dma_chan = &mv_chan->dmachan;
...@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) ...@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
goto out; goto out;
} }
dest_dma = dma_map_single(dma_chan->device->dev, dest, unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); if (!unmap) {
err = -ENOMEM;
goto free_resources;
}
src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
PAGE_SIZE, DMA_TO_DEVICE);
unmap->to_cnt = 1;
unmap->addr[0] = src_dma;
dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
PAGE_SIZE, DMA_FROM_DEVICE);
unmap->from_cnt = 1;
unmap->addr[1] = dest_dma;
src_dma = dma_map_single(dma_chan->device->dev, src, unmap->len = PAGE_SIZE;
MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
MV_XOR_TEST_SIZE, 0); PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx); cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan); mv_xor_issue_pending(dma_chan);
async_tx_ack(tx); async_tx_ack(tx);
...@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) ...@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
} }
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma, dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
MV_XOR_TEST_SIZE, DMA_FROM_DEVICE); PAGE_SIZE, DMA_FROM_DEVICE);
if (memcmp(src, dest, MV_XOR_TEST_SIZE)) { if (memcmp(src, dest, PAGE_SIZE)) {
dev_err(dma_chan->device->dev, dev_err(dma_chan->device->dev,
"Self-test copy failed compare, disabling\n"); "Self-test copy failed compare, disabling\n");
err = -ENODEV; err = -ENODEV;
...@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) ...@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
} }
free_resources: free_resources:
dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan); mv_xor_free_chan_resources(dma_chan);
out: out:
kfree(src); kfree(src);
...@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) ...@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dest_dma; dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
struct dmaengine_unmap_data *unmap;
struct dma_chan *dma_chan; struct dma_chan *dma_chan;
dma_cookie_t cookie; dma_cookie_t cookie;
u8 cmp_byte = 0; u8 cmp_byte = 0;
u32 cmp_word; u32 cmp_word;
int err = 0; int err = 0;
int src_count = MV_XOR_NUM_SRC_TEST;
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { for (src_idx = 0; src_idx < src_count; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL); xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) { if (!xor_srcs[src_idx]) {
while (src_idx--) while (src_idx--)
...@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) ...@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
} }
/* Fill in src buffers */ /* Fill in src buffers */
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) { for (src_idx = 0; src_idx < src_count; src_idx++) {
u8 *ptr = page_address(xor_srcs[src_idx]); u8 *ptr = page_address(xor_srcs[src_idx]);
for (i = 0; i < PAGE_SIZE; i++) for (i = 0; i < PAGE_SIZE; i++)
ptr[i] = (1 << src_idx); ptr[i] = (1 << src_idx);
} }
for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) for (src_idx = 0; src_idx < src_count; src_idx++)
cmp_byte ^= (u8) (1 << src_idx); cmp_byte ^= (u8) (1 << src_idx);
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
...@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) ...@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
goto out; goto out;
} }
/* test xor */ unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, GFP_KERNEL);
DMA_FROM_DEVICE); if (!unmap) {
err = -ENOMEM;
goto free_resources;
}
for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++) /* test xor */
dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], for (i = 0; i < src_count; i++) {
unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
0, PAGE_SIZE, DMA_TO_DEVICE); 0, PAGE_SIZE, DMA_TO_DEVICE);
dma_srcs[i] = unmap->addr[i];
unmap->to_cnt++;
}
unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
DMA_FROM_DEVICE);
dest_dma = unmap->addr[src_count];
unmap->from_cnt = 1;
unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0); src_count, PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx); cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan); mv_xor_issue_pending(dma_chan);
...@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) ...@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
} }
free_resources: free_resources:
dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan); mv_xor_free_chan_resources(dma_chan);
out: out:
src_idx = MV_XOR_NUM_SRC_TEST; src_idx = src_count;
while (src_idx--) while (src_idx--)
__free_page(xor_srcs[src_idx]); __free_page(xor_srcs[src_idx]);
__free_page(dest); __free_page(dest);
...@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev) ...@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
int i = 0; int i = 0;
for_each_child_of_node(pdev->dev.of_node, np) { for_each_child_of_node(pdev->dev.of_node, np) {
struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask; dma_cap_mask_t cap_mask;
int irq; int irq;
...@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev) ...@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
goto err_channel_add; goto err_channel_add;
} }
xordev->channels[i] = chan = mv_xor_channel_add(xordev, pdev, i,
mv_xor_channel_add(xordev, pdev, i,
cap_mask, irq); cap_mask, irq);
if (IS_ERR(xordev->channels[i])) { if (IS_ERR(chan)) {
ret = PTR_ERR(xordev->channels[i]); ret = PTR_ERR(chan);
xordev->channels[i] = NULL;
irq_dispose_mapping(irq); irq_dispose_mapping(irq);
goto err_channel_add; goto err_channel_add;
} }
xordev->channels[i] = chan;
i++; i++;
} }
} else if (pdata && pdata->channels) { } else if (pdata && pdata->channels) {
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) { for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
struct mv_xor_channel_data *cd; struct mv_xor_channel_data *cd;
struct mv_xor_chan *chan;
int irq; int irq;
cd = &pdata->channels[i]; cd = &pdata->channels[i];
...@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev) ...@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
goto err_channel_add; goto err_channel_add;
} }
xordev->channels[i] = chan = mv_xor_channel_add(xordev, pdev, i,
mv_xor_channel_add(xordev, pdev, i,
cd->cap_mask, irq); cd->cap_mask, irq);
if (IS_ERR(xordev->channels[i])) { if (IS_ERR(chan)) {
ret = PTR_ERR(xordev->channels[i]); ret = PTR_ERR(chan);
goto err_channel_add; goto err_channel_add;
} }
xordev->channels[i] = chan;
} }
} }
......
...@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
static inline void _init_desc(struct dma_pl330_desc *desc) static inline void _init_desc(struct dma_pl330_desc *desc)
{ {
desc->pchan = NULL;
desc->req.x = &desc->px; desc->req.x = &desc->px;
desc->req.token = desc; desc->req.token = desc;
desc->rqcfg.swap = SWAP_NO; desc->rqcfg.swap = SWAP_NO;
desc->rqcfg.privileged = 0;
desc->rqcfg.insnaccess = 0;
desc->rqcfg.scctl = SCCTRL0; desc->rqcfg.scctl = SCCTRL0;
desc->rqcfg.dcctl = DCCTRL0; desc->rqcfg.dcctl = DCCTRL0;
desc->req.cfg = &desc->rqcfg; desc->req.cfg = &desc->rqcfg;
...@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) ...@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
if (!pdmac) if (!pdmac)
return 0; return 0;
desc = kmalloc(count * sizeof(*desc), flg); desc = kcalloc(count, sizeof(*desc), flg);
if (!desc) if (!desc)
return 0; return 0;
......
...@@ -532,29 +532,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc, ...@@ -532,29 +532,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2; hw_desc->opc = DMA_CDB_OPC_MV_SG1_SG2;
} }
/**
* ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
*/
static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
int value, unsigned long flags)
{
struct dma_cdb *hw_desc = desc->hw_desc;
memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
desc->hw_next = NULL;
desc->src_cnt = 1;
desc->dst_cnt = 1;
if (flags & DMA_PREP_INTERRUPT)
set_bit(PPC440SPE_DESC_INT, &desc->flags);
else
clear_bit(PPC440SPE_DESC_INT, &desc->flags);
hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
hw_desc->opc = DMA_CDB_OPC_DFILL128;
}
/** /**
* ppc440spe_desc_set_src_addr - set source address into the descriptor * ppc440spe_desc_set_src_addr - set source address into the descriptor
*/ */
...@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions( ...@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
struct ppc440spe_adma_chan *chan, struct ppc440spe_adma_chan *chan,
dma_cookie_t cookie) dma_cookie_t cookie)
{ {
int i;
BUG_ON(desc->async_tx.cookie < 0); BUG_ON(desc->async_tx.cookie < 0);
if (desc->async_tx.cookie > 0) { if (desc->async_tx.cookie > 0) {
cookie = desc->async_tx.cookie; cookie = desc->async_tx.cookie;
...@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev) ...@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
ppc440spe_adma_prep_dma_interrupt; ppc440spe_adma_prep_dma_interrupt;
} }
pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: " pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
"( %s%s%s%s%s%s%s)\n", "( %s%s%s%s%s%s)\n",
dev_name(adev->dev), dev_name(adev->dev),
dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "", dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "", dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
......
...@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, ...@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *param; void *param;
struct dma_async_tx_descriptor *txd = &desc->txd; struct dma_async_tx_descriptor *txd = &desc->txd;
struct txx9dmac_slave *ds = dc->chan.private;
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
txd->cookie, desc); txd->cookie, desc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment