Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
a6eaf23b
Commit
a6eaf23b
authored
Jan 06, 2016
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/rcar' into for-linus
parents
c81bc960
4d42e95f
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
8 additions
and
781 deletions
+8
-781
Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
+8
-2
drivers/dma/sh/Kconfig
drivers/dma/sh/Kconfig
+0
-6
drivers/dma/sh/Makefile
drivers/dma/sh/Makefile
+0
-1
drivers/dma/sh/rcar-hpbdma.c
drivers/dma/sh/rcar-hpbdma.c
+0
-669
include/linux/platform_data/dma-rcar-hpbdma.h
include/linux/platform_data/dma-rcar-hpbdma.h
+0
-103
No files found.
Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
View file @
a6eaf23b
* Renesas USB DMA Controller Device Tree bindings
Required Properties:
- compatible: must contain "renesas,usb-dmac"
-compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback.
Examples with soctypes are:
- "renesas,r8a7790-usb-dmac" (R-Car H2)
- "renesas,r8a7791-usb-dmac" (R-Car M2-W)
- "renesas,r8a7793-usb-dmac" (R-Car M2-N)
- "renesas,r8a7794-usb-dmac" (R-Car E2)
- "renesas,r8a7795-usb-dmac" (R-Car H3)
- reg: base address and length of the registers block for the DMAC
- interrupts: interrupt specifiers for the DMAC, one for each entry in
interrupt-names.
...
...
@@ -15,7 +21,7 @@ Required Properties:
Example: R8A7790 (R-Car H2) USB-DMACs
usb_dmac0: dma-controller@e65a0000 {
compatible = "renesas,usb-dmac";
compatible = "renesas,
r8a7790-usb-dmac", "renesas,
usb-dmac";
reg = <0 0xe65a0000 0 0x100>;
interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH
0 109 IRQ_TYPE_LEVEL_HIGH>;
...
...
drivers/dma/sh/Kconfig
View file @
a6eaf23b
...
...
@@ -47,12 +47,6 @@ config RCAR_DMAC
This driver supports the general purpose DMA controller found in the
Renesas R-Car second generation SoCs.
config RCAR_HPB_DMAE
tristate "Renesas R-Car HPB DMAC support"
depends on SH_DMAE_BASE
help
Enable support for the Renesas R-Car series DMA controllers.
config RENESAS_USB_DMAC
tristate "Renesas USB-DMA Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
...
...
drivers/dma/sh/Makefile
View file @
a6eaf23b
...
...
@@ -14,6 +14,5 @@ shdma-objs := $(shdma-y)
obj-$(CONFIG_SH_DMAE)
+=
shdma.o
obj-$(CONFIG_RCAR_DMAC)
+=
rcar-dmac.o
obj-$(CONFIG_RCAR_HPB_DMAE)
+=
rcar-hpbdma.o
obj-$(CONFIG_RENESAS_USB_DMAC)
+=
usb-dmac.o
obj-$(CONFIG_SUDMAC)
+=
sudmac.o
drivers/dma/sh/rcar-hpbdma.c
deleted
100644 → 0
View file @
c81bc960
/*
* Copyright (C) 2011-2013 Renesas Electronics Corporation
* Copyright (C) 2013 Cogent Embedded, Inc.
*
* This file is based on the drivers/dma/sh/shdma.c
*
* Renesas SuperH DMA Engine support
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* - DMA of SuperH does not have Hardware DMA chain mode.
* - max DMA size is 16MB.
*
*/
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_data/dma-rcar-hpbdma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/shdma-base.h>
#include <linux/slab.h>
/* DMA channel registers */
#define HPB_DMAE_DSAR0 0x00
#define HPB_DMAE_DDAR0 0x04
#define HPB_DMAE_DTCR0 0x08
#define HPB_DMAE_DSAR1 0x0C
#define HPB_DMAE_DDAR1 0x10
#define HPB_DMAE_DTCR1 0x14
#define HPB_DMAE_DSASR 0x18
#define HPB_DMAE_DDASR 0x1C
#define HPB_DMAE_DTCSR 0x20
#define HPB_DMAE_DPTR 0x24
#define HPB_DMAE_DCR 0x28
#define HPB_DMAE_DCMDR 0x2C
#define HPB_DMAE_DSTPR 0x30
#define HPB_DMAE_DSTSR 0x34
#define HPB_DMAE_DDBGR 0x38
#define HPB_DMAE_DDBGR2 0x3C
#define HPB_DMAE_CHAN(n) (0x40 * (n))
/* DMA command register (DCMDR) bits */
#define HPB_DMAE_DCMDR_BDOUT BIT(7)
#define HPB_DMAE_DCMDR_DQSPD BIT(6)
#define HPB_DMAE_DCMDR_DQSPC BIT(5)
#define HPB_DMAE_DCMDR_DMSPD BIT(4)
#define HPB_DMAE_DCMDR_DMSPC BIT(3)
#define HPB_DMAE_DCMDR_DQEND BIT(2)
#define HPB_DMAE_DCMDR_DNXT BIT(1)
#define HPB_DMAE_DCMDR_DMEN BIT(0)
/* DMA forced stop register (DSTPR) bits */
#define HPB_DMAE_DSTPR_DMSTP BIT(0)
/* DMA status register (DSTSR) bits */
#define HPB_DMAE_DSTSR_DQSTS BIT(2)
#define HPB_DMAE_DSTSR_DMSTS BIT(0)
/* DMA common registers */
#define HPB_DMAE_DTIMR 0x00
#define HPB_DMAE_DINTSR0 0x0C
#define HPB_DMAE_DINTSR1 0x10
#define HPB_DMAE_DINTCR0 0x14
#define HPB_DMAE_DINTCR1 0x18
#define HPB_DMAE_DINTMR0 0x1C
#define HPB_DMAE_DINTMR1 0x20
#define HPB_DMAE_DACTSR0 0x24
#define HPB_DMAE_DACTSR1 0x28
#define HPB_DMAE_HSRSTR(n) (0x40 + (n) * 4)
#define HPB_DMAE_HPB_DMASPR(n) (0x140 + (n) * 4)
#define HPB_DMAE_HPB_DMLVLR0 0x160
#define HPB_DMAE_HPB_DMLVLR1 0x164
#define HPB_DMAE_HPB_DMSHPT0 0x168
#define HPB_DMAE_HPB_DMSHPT1 0x16C
#define HPB_DMA_SLAVE_NUMBER 256
#define HPB_DMA_TCR_MAX 0x01000000
/* 16 MiB */
struct
hpb_dmae_chan
{
struct
shdma_chan
shdma_chan
;
int
xfer_mode
;
/* DMA transfer mode */
#define XFER_SINGLE 1
#define XFER_DOUBLE 2
unsigned
plane_idx
;
/* current DMA information set */
bool
first_desc
;
/* first/next transfer */
int
xmit_shift
;
/* log_2(bytes_per_xfer) */
void
__iomem
*
base
;
const
struct
hpb_dmae_slave_config
*
cfg
;
char
dev_id
[
16
];
/* unique name per DMAC of channel */
dma_addr_t
slave_addr
;
};
struct
hpb_dmae_device
{
struct
shdma_dev
shdma_dev
;
spinlock_t
reg_lock
;
/* comm_reg operation lock */
struct
hpb_dmae_pdata
*
pdata
;
void
__iomem
*
chan_reg
;
void
__iomem
*
comm_reg
;
void
__iomem
*
reset_reg
;
void
__iomem
*
mode_reg
;
};
struct
hpb_dmae_regs
{
u32
sar
;
/* SAR / source address */
u32
dar
;
/* DAR / destination address */
u32
tcr
;
/* TCR / transfer count */
};
struct
hpb_desc
{
struct
shdma_desc
shdma_desc
;
struct
hpb_dmae_regs
hw
;
unsigned
plane_idx
;
};
#define to_chan(schan) container_of(schan, struct hpb_dmae_chan, shdma_chan)
#define to_desc(sdesc) container_of(sdesc, struct hpb_desc, shdma_desc)
#define to_dev(sc) container_of(sc->shdma_chan.dma_chan.device, \
struct hpb_dmae_device, shdma_dev.dma_dev)
static
void
ch_reg_write
(
struct
hpb_dmae_chan
*
hpb_dc
,
u32
data
,
u32
reg
)
{
iowrite32
(
data
,
hpb_dc
->
base
+
reg
);
}
static
u32
ch_reg_read
(
struct
hpb_dmae_chan
*
hpb_dc
,
u32
reg
)
{
return
ioread32
(
hpb_dc
->
base
+
reg
);
}
static
void
dcmdr_write
(
struct
hpb_dmae_device
*
hpbdev
,
u32
data
)
{
iowrite32
(
data
,
hpbdev
->
chan_reg
+
HPB_DMAE_DCMDR
);
}
static
void
hsrstr_write
(
struct
hpb_dmae_device
*
hpbdev
,
u32
ch
)
{
iowrite32
(
0x1
,
hpbdev
->
comm_reg
+
HPB_DMAE_HSRSTR
(
ch
));
}
static
u32
dintsr_read
(
struct
hpb_dmae_device
*
hpbdev
,
u32
ch
)
{
u32
v
;
if
(
ch
<
32
)
v
=
ioread32
(
hpbdev
->
comm_reg
+
HPB_DMAE_DINTSR0
)
>>
ch
;
else
v
=
ioread32
(
hpbdev
->
comm_reg
+
HPB_DMAE_DINTSR1
)
>>
(
ch
-
32
);
return
v
&
0x1
;
}
static
void
dintcr_write
(
struct
hpb_dmae_device
*
hpbdev
,
u32
ch
)
{
if
(
ch
<
32
)
iowrite32
((
0x1
<<
ch
),
hpbdev
->
comm_reg
+
HPB_DMAE_DINTCR0
);
else
iowrite32
((
0x1
<<
(
ch
-
32
)),
hpbdev
->
comm_reg
+
HPB_DMAE_DINTCR1
);
}
static
void
asyncmdr_write
(
struct
hpb_dmae_device
*
hpbdev
,
u32
data
)
{
iowrite32
(
data
,
hpbdev
->
mode_reg
);
}
static
u32
asyncmdr_read
(
struct
hpb_dmae_device
*
hpbdev
)
{
return
ioread32
(
hpbdev
->
mode_reg
);
}
static
void
hpb_dmae_enable_int
(
struct
hpb_dmae_device
*
hpbdev
,
u32
ch
)
{
u32
intreg
;
spin_lock_irq
(
&
hpbdev
->
reg_lock
);
if
(
ch
<
32
)
{
intreg
=
ioread32
(
hpbdev
->
comm_reg
+
HPB_DMAE_DINTMR0
);
iowrite32
(
BIT
(
ch
)
|
intreg
,
hpbdev
->
comm_reg
+
HPB_DMAE_DINTMR0
);
}
else
{
intreg
=
ioread32
(
hpbdev
->
comm_reg
+
HPB_DMAE_DINTMR1
);
iowrite32
(
BIT
(
ch
-
32
)
|
intreg
,
hpbdev
->
comm_reg
+
HPB_DMAE_DINTMR1
);
}
spin_unlock_irq
(
&
hpbdev
->
reg_lock
);
}
static
void
hpb_dmae_async_reset
(
struct
hpb_dmae_device
*
hpbdev
,
u32
data
)
{
u32
rstr
;
int
timeout
=
10000
;
/* 100 ms */
spin_lock
(
&
hpbdev
->
reg_lock
);
rstr
=
ioread32
(
hpbdev
->
reset_reg
);
rstr
|=
data
;
iowrite32
(
rstr
,
hpbdev
->
reset_reg
);
do
{
rstr
=
ioread32
(
hpbdev
->
reset_reg
);
if
((
rstr
&
data
)
==
data
)
break
;
udelay
(
10
);
}
while
(
timeout
--
);
if
(
timeout
<
0
)
dev_err
(
hpbdev
->
shdma_dev
.
dma_dev
.
dev
,
"%s timeout
\n
"
,
__func__
);
rstr
&=
~
data
;
iowrite32
(
rstr
,
hpbdev
->
reset_reg
);
spin_unlock
(
&
hpbdev
->
reg_lock
);
}
static
void
hpb_dmae_set_async_mode
(
struct
hpb_dmae_device
*
hpbdev
,
u32
mask
,
u32
data
)
{
u32
mode
;
spin_lock_irq
(
&
hpbdev
->
reg_lock
);
mode
=
asyncmdr_read
(
hpbdev
);
mode
&=
~
mask
;
mode
|=
data
;
asyncmdr_write
(
hpbdev
,
mode
);
spin_unlock_irq
(
&
hpbdev
->
reg_lock
);
}
static
void
hpb_dmae_ctl_stop
(
struct
hpb_dmae_device
*
hpbdev
)
{
dcmdr_write
(
hpbdev
,
HPB_DMAE_DCMDR_DQSPD
);
}
static
void
hpb_dmae_reset
(
struct
hpb_dmae_device
*
hpbdev
)
{
u32
ch
;
for
(
ch
=
0
;
ch
<
hpbdev
->
pdata
->
num_hw_channels
;
ch
++
)
hsrstr_write
(
hpbdev
,
ch
);
}
static
unsigned
int
calc_xmit_shift
(
struct
hpb_dmae_chan
*
hpb_chan
)
{
struct
hpb_dmae_device
*
hpbdev
=
to_dev
(
hpb_chan
);
struct
hpb_dmae_pdata
*
pdata
=
hpbdev
->
pdata
;
int
width
=
ch_reg_read
(
hpb_chan
,
HPB_DMAE_DCR
);
int
i
;
switch
(
width
&
(
HPB_DMAE_DCR_SPDS_MASK
|
HPB_DMAE_DCR_DPDS_MASK
))
{
case
HPB_DMAE_DCR_SPDS_8BIT
|
HPB_DMAE_DCR_DPDS_8BIT
:
default:
i
=
XMIT_SZ_8BIT
;
break
;
case
HPB_DMAE_DCR_SPDS_16BIT
|
HPB_DMAE_DCR_DPDS_16BIT
:
i
=
XMIT_SZ_16BIT
;
break
;
case
HPB_DMAE_DCR_SPDS_32BIT
|
HPB_DMAE_DCR_DPDS_32BIT
:
i
=
XMIT_SZ_32BIT
;
break
;
}
return
pdata
->
ts_shift
[
i
];
}
static
void
hpb_dmae_set_reg
(
struct
hpb_dmae_chan
*
hpb_chan
,
struct
hpb_dmae_regs
*
hw
,
unsigned
plane
)
{
ch_reg_write
(
hpb_chan
,
hw
->
sar
,
plane
?
HPB_DMAE_DSAR1
:
HPB_DMAE_DSAR0
);
ch_reg_write
(
hpb_chan
,
hw
->
dar
,
plane
?
HPB_DMAE_DDAR1
:
HPB_DMAE_DDAR0
);
ch_reg_write
(
hpb_chan
,
hw
->
tcr
>>
hpb_chan
->
xmit_shift
,
plane
?
HPB_DMAE_DTCR1
:
HPB_DMAE_DTCR0
);
}
static
void
hpb_dmae_start
(
struct
hpb_dmae_chan
*
hpb_chan
,
bool
next
)
{
ch_reg_write
(
hpb_chan
,
(
next
?
HPB_DMAE_DCMDR_DNXT
:
0
)
|
HPB_DMAE_DCMDR_DMEN
,
HPB_DMAE_DCMDR
);
}
static
void
hpb_dmae_halt
(
struct
shdma_chan
*
schan
)
{
struct
hpb_dmae_chan
*
chan
=
to_chan
(
schan
);
ch_reg_write
(
chan
,
HPB_DMAE_DCMDR_DQEND
,
HPB_DMAE_DCMDR
);
ch_reg_write
(
chan
,
HPB_DMAE_DSTPR_DMSTP
,
HPB_DMAE_DSTPR
);
chan
->
plane_idx
=
0
;
chan
->
first_desc
=
true
;
}
static
const
struct
hpb_dmae_slave_config
*
hpb_dmae_find_slave
(
struct
hpb_dmae_chan
*
hpb_chan
,
int
slave_id
)
{
struct
hpb_dmae_device
*
hpbdev
=
to_dev
(
hpb_chan
);
struct
hpb_dmae_pdata
*
pdata
=
hpbdev
->
pdata
;
int
i
;
if
(
slave_id
>=
HPB_DMA_SLAVE_NUMBER
)
return
NULL
;
for
(
i
=
0
;
i
<
pdata
->
num_slaves
;
i
++
)
if
(
pdata
->
slaves
[
i
].
id
==
slave_id
)
return
pdata
->
slaves
+
i
;
return
NULL
;
}
static
void
hpb_dmae_start_xfer
(
struct
shdma_chan
*
schan
,
struct
shdma_desc
*
sdesc
)
{
struct
hpb_dmae_chan
*
chan
=
to_chan
(
schan
);
struct
hpb_dmae_device
*
hpbdev
=
to_dev
(
chan
);
struct
hpb_desc
*
desc
=
to_desc
(
sdesc
);
if
(
chan
->
cfg
->
flags
&
HPB_DMAE_SET_ASYNC_RESET
)
hpb_dmae_async_reset
(
hpbdev
,
chan
->
cfg
->
rstr
);
desc
->
plane_idx
=
chan
->
plane_idx
;
hpb_dmae_set_reg
(
chan
,
&
desc
->
hw
,
chan
->
plane_idx
);
hpb_dmae_start
(
chan
,
!
chan
->
first_desc
);
if
(
chan
->
xfer_mode
==
XFER_DOUBLE
)
{
chan
->
plane_idx
^=
1
;
chan
->
first_desc
=
false
;
}
}
static
bool
hpb_dmae_desc_completed
(
struct
shdma_chan
*
schan
,
struct
shdma_desc
*
sdesc
)
{
/*
* This is correct since we always have at most single
* outstanding DMA transfer per channel, and by the time
* we get completion interrupt the transfer is completed.
* This will change if we ever use alternating DMA
* information sets and submit two descriptors at once.
*/
return
true
;
}
static
bool
hpb_dmae_chan_irq
(
struct
shdma_chan
*
schan
,
int
irq
)
{
struct
hpb_dmae_chan
*
chan
=
to_chan
(
schan
);
struct
hpb_dmae_device
*
hpbdev
=
to_dev
(
chan
);
int
ch
=
chan
->
cfg
->
dma_ch
;
/* Check Complete DMA Transfer */
if
(
dintsr_read
(
hpbdev
,
ch
))
{
/* Clear Interrupt status */
dintcr_write
(
hpbdev
,
ch
);
return
true
;
}
return
false
;
}
static
int
hpb_dmae_desc_setup
(
struct
shdma_chan
*
schan
,
struct
shdma_desc
*
sdesc
,
dma_addr_t
src
,
dma_addr_t
dst
,
size_t
*
len
)
{
struct
hpb_desc
*
desc
=
to_desc
(
sdesc
);
if
(
*
len
>
(
size_t
)
HPB_DMA_TCR_MAX
)
*
len
=
(
size_t
)
HPB_DMA_TCR_MAX
;
desc
->
hw
.
sar
=
src
;
desc
->
hw
.
dar
=
dst
;
desc
->
hw
.
tcr
=
*
len
;
return
0
;
}
static
size_t
hpb_dmae_get_partial
(
struct
shdma_chan
*
schan
,
struct
shdma_desc
*
sdesc
)
{
struct
hpb_desc
*
desc
=
to_desc
(
sdesc
);
struct
hpb_dmae_chan
*
chan
=
to_chan
(
schan
);
u32
tcr
=
ch_reg_read
(
chan
,
desc
->
plane_idx
?
HPB_DMAE_DTCR1
:
HPB_DMAE_DTCR0
);
return
(
desc
->
hw
.
tcr
-
tcr
)
<<
chan
->
xmit_shift
;
}
static
bool
hpb_dmae_channel_busy
(
struct
shdma_chan
*
schan
)
{
struct
hpb_dmae_chan
*
chan
=
to_chan
(
schan
);
u32
dstsr
=
ch_reg_read
(
chan
,
HPB_DMAE_DSTSR
);
if
(
chan
->
xfer_mode
==
XFER_DOUBLE
)
return
dstsr
&
HPB_DMAE_DSTSR_DQSTS
;
else
return
dstsr
&
HPB_DMAE_DSTSR_DMSTS
;
}
static
int
hpb_dmae_alloc_chan_resources
(
struct
hpb_dmae_chan
*
hpb_chan
,
const
struct
hpb_dmae_slave_config
*
cfg
)
{
struct
hpb_dmae_device
*
hpbdev
=
to_dev
(
hpb_chan
);
struct
hpb_dmae_pdata
*
pdata
=
hpbdev
->
pdata
;
const
struct
hpb_dmae_channel
*
channel
=
pdata
->
channels
;
int
slave_id
=
cfg
->
id
;
int
i
,
err
;
for
(
i
=
0
;
i
<
pdata
->
num_channels
;
i
++
,
channel
++
)
{
if
(
channel
->
s_id
==
slave_id
)
{
struct
device
*
dev
=
hpb_chan
->
shdma_chan
.
dev
;
hpb_chan
->
base
=
hpbdev
->
chan_reg
+
HPB_DMAE_CHAN
(
cfg
->
dma_ch
);
dev_dbg
(
dev
,
"Detected Slave device
\n
"
);
dev_dbg
(
dev
,
" -- slave_id : 0x%x
\n
"
,
slave_id
);
dev_dbg
(
dev
,
" -- cfg->dma_ch : %d
\n
"
,
cfg
->
dma_ch
);
dev_dbg
(
dev
,
" -- channel->ch_irq: %d
\n
"
,
channel
->
ch_irq
);
break
;
}
}
err
=
shdma_request_irq
(
&
hpb_chan
->
shdma_chan
,
channel
->
ch_irq
,
IRQF_SHARED
,
hpb_chan
->
dev_id
);
if
(
err
)
{
dev_err
(
hpb_chan
->
shdma_chan
.
dev
,
"DMA channel request_irq %d failed with error %d
\n
"
,
channel
->
ch_irq
,
err
);
return
err
;
}
hpb_chan
->
plane_idx
=
0
;
hpb_chan
->
first_desc
=
true
;
if
((
cfg
->
dcr
&
(
HPB_DMAE_DCR_CT
|
HPB_DMAE_DCR_DIP
))
==
0
)
{
hpb_chan
->
xfer_mode
=
XFER_SINGLE
;
}
else
if
((
cfg
->
dcr
&
(
HPB_DMAE_DCR_CT
|
HPB_DMAE_DCR_DIP
))
==
(
HPB_DMAE_DCR_CT
|
HPB_DMAE_DCR_DIP
))
{
hpb_chan
->
xfer_mode
=
XFER_DOUBLE
;
}
else
{
dev_err
(
hpb_chan
->
shdma_chan
.
dev
,
"DCR setting error"
);
return
-
EINVAL
;
}
if
(
cfg
->
flags
&
HPB_DMAE_SET_ASYNC_MODE
)
hpb_dmae_set_async_mode
(
hpbdev
,
cfg
->
mdm
,
cfg
->
mdr
);
ch_reg_write
(
hpb_chan
,
cfg
->
dcr
,
HPB_DMAE_DCR
);
ch_reg_write
(
hpb_chan
,
cfg
->
port
,
HPB_DMAE_DPTR
);
hpb_chan
->
xmit_shift
=
calc_xmit_shift
(
hpb_chan
);
hpb_dmae_enable_int
(
hpbdev
,
cfg
->
dma_ch
);
return
0
;
}
static
int
hpb_dmae_set_slave
(
struct
shdma_chan
*
schan
,
int
slave_id
,
dma_addr_t
slave_addr
,
bool
try
)
{
struct
hpb_dmae_chan
*
chan
=
to_chan
(
schan
);
const
struct
hpb_dmae_slave_config
*
sc
=
hpb_dmae_find_slave
(
chan
,
slave_id
);
if
(
!
sc
)
return
-
ENODEV
;
if
(
try
)
return
0
;
chan
->
cfg
=
sc
;
chan
->
slave_addr
=
slave_addr
?
:
sc
->
addr
;
return
hpb_dmae_alloc_chan_resources
(
chan
,
sc
);
}
static
void
hpb_dmae_setup_xfer
(
struct
shdma_chan
*
schan
,
int
slave_id
)
{
}
static
dma_addr_t
hpb_dmae_slave_addr
(
struct
shdma_chan
*
schan
)
{
struct
hpb_dmae_chan
*
chan
=
to_chan
(
schan
);
return
chan
->
slave_addr
;
}
static
struct
shdma_desc
*
hpb_dmae_embedded_desc
(
void
*
buf
,
int
i
)
{
return
&
((
struct
hpb_desc
*
)
buf
)[
i
].
shdma_desc
;
}
static
const
struct
shdma_ops
hpb_dmae_ops
=
{
.
desc_completed
=
hpb_dmae_desc_completed
,
.
halt_channel
=
hpb_dmae_halt
,
.
channel_busy
=
hpb_dmae_channel_busy
,
.
slave_addr
=
hpb_dmae_slave_addr
,
.
desc_setup
=
hpb_dmae_desc_setup
,
.
set_slave
=
hpb_dmae_set_slave
,
.
setup_xfer
=
hpb_dmae_setup_xfer
,
.
start_xfer
=
hpb_dmae_start_xfer
,
.
embedded_desc
=
hpb_dmae_embedded_desc
,
.
chan_irq
=
hpb_dmae_chan_irq
,
.
get_partial
=
hpb_dmae_get_partial
,
};
static
int
hpb_dmae_chan_probe
(
struct
hpb_dmae_device
*
hpbdev
,
int
id
)
{
struct
shdma_dev
*
sdev
=
&
hpbdev
->
shdma_dev
;
struct
platform_device
*
pdev
=
to_platform_device
(
hpbdev
->
shdma_dev
.
dma_dev
.
dev
);
struct
hpb_dmae_chan
*
new_hpb_chan
;
struct
shdma_chan
*
schan
;
/* Alloc channel */
new_hpb_chan
=
devm_kzalloc
(
&
pdev
->
dev
,
sizeof
(
struct
hpb_dmae_chan
),
GFP_KERNEL
);
if
(
!
new_hpb_chan
)
{
dev_err
(
hpbdev
->
shdma_dev
.
dma_dev
.
dev
,
"No free memory for allocating DMA channels!
\n
"
);
return
-
ENOMEM
;
}
schan
=
&
new_hpb_chan
->
shdma_chan
;
schan
->
max_xfer_len
=
HPB_DMA_TCR_MAX
;
shdma_chan_probe
(
sdev
,
schan
,
id
);
if
(
pdev
->
id
>=
0
)
snprintf
(
new_hpb_chan
->
dev_id
,
sizeof
(
new_hpb_chan
->
dev_id
),
"hpb-dmae%d.%d"
,
pdev
->
id
,
id
);
else
snprintf
(
new_hpb_chan
->
dev_id
,
sizeof
(
new_hpb_chan
->
dev_id
),
"hpb-dma.%d"
,
id
);
return
0
;
}
static
int
hpb_dmae_probe
(
struct
platform_device
*
pdev
)
{
const
enum
dma_slave_buswidth
widths
=
DMA_SLAVE_BUSWIDTH_1_BYTE
|
DMA_SLAVE_BUSWIDTH_2_BYTES
|
DMA_SLAVE_BUSWIDTH_4_BYTES
;
struct
hpb_dmae_pdata
*
pdata
=
pdev
->
dev
.
platform_data
;
struct
hpb_dmae_device
*
hpbdev
;
struct
dma_device
*
dma_dev
;
struct
resource
*
chan
,
*
comm
,
*
rest
,
*
mode
,
*
irq_res
;
int
err
,
i
;
/* Get platform data */
if
(
!
pdata
||
!
pdata
->
num_channels
)
return
-
ENODEV
;
chan
=
platform_get_resource
(
pdev
,
IORESOURCE_MEM
,
0
);
comm
=
platform_get_resource
(
pdev
,
IORESOURCE_MEM
,
1
);
rest
=
platform_get_resource
(
pdev
,
IORESOURCE_MEM
,
2
);
mode
=
platform_get_resource
(
pdev
,
IORESOURCE_MEM
,
3
);
irq_res
=
platform_get_resource
(
pdev
,
IORESOURCE_IRQ
,
0
);
if
(
!
irq_res
)
return
-
ENODEV
;
hpbdev
=
devm_kzalloc
(
&
pdev
->
dev
,
sizeof
(
struct
hpb_dmae_device
),
GFP_KERNEL
);
if
(
!
hpbdev
)
{
dev_err
(
&
pdev
->
dev
,
"Not enough memory
\n
"
);
return
-
ENOMEM
;
}
hpbdev
->
chan_reg
=
devm_ioremap_resource
(
&
pdev
->
dev
,
chan
);
if
(
IS_ERR
(
hpbdev
->
chan_reg
))
return
PTR_ERR
(
hpbdev
->
chan_reg
);
hpbdev
->
comm_reg
=
devm_ioremap_resource
(
&
pdev
->
dev
,
comm
);
if
(
IS_ERR
(
hpbdev
->
comm_reg
))
return
PTR_ERR
(
hpbdev
->
comm_reg
);
hpbdev
->
reset_reg
=
devm_ioremap_resource
(
&
pdev
->
dev
,
rest
);
if
(
IS_ERR
(
hpbdev
->
reset_reg
))
return
PTR_ERR
(
hpbdev
->
reset_reg
);
hpbdev
->
mode_reg
=
devm_ioremap_resource
(
&
pdev
->
dev
,
mode
);
if
(
IS_ERR
(
hpbdev
->
mode_reg
))
return
PTR_ERR
(
hpbdev
->
mode_reg
);
dma_dev
=
&
hpbdev
->
shdma_dev
.
dma_dev
;
spin_lock_init
(
&
hpbdev
->
reg_lock
);
/* Platform data */
hpbdev
->
pdata
=
pdata
;
pm_runtime_enable
(
&
pdev
->
dev
);
err
=
pm_runtime_get_sync
(
&
pdev
->
dev
);
if
(
err
<
0
)
dev_err
(
&
pdev
->
dev
,
"%s(): GET = %d
\n
"
,
__func__
,
err
);
/* Reset DMA controller */
hpb_dmae_reset
(
hpbdev
);
pm_runtime_put
(
&
pdev
->
dev
);
dma_cap_set
(
DMA_MEMCPY
,
dma_dev
->
cap_mask
);
dma_cap_set
(
DMA_SLAVE
,
dma_dev
->
cap_mask
);
dma_dev
->
src_addr_widths
=
widths
;
dma_dev
->
dst_addr_widths
=
widths
;
dma_dev
->
directions
=
BIT
(
DMA_MEM_TO_DEV
)
|
BIT
(
DMA_DEV_TO_MEM
);
dma_dev
->
residue_granularity
=
DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
hpbdev
->
shdma_dev
.
ops
=
&
hpb_dmae_ops
;
hpbdev
->
shdma_dev
.
desc_size
=
sizeof
(
struct
hpb_desc
);
err
=
shdma_init
(
&
pdev
->
dev
,
&
hpbdev
->
shdma_dev
,
pdata
->
num_channels
);
if
(
err
<
0
)
goto
error
;
/* Create DMA channels */
for
(
i
=
0
;
i
<
pdata
->
num_channels
;
i
++
)
hpb_dmae_chan_probe
(
hpbdev
,
i
);
platform_set_drvdata
(
pdev
,
hpbdev
);
err
=
dma_async_device_register
(
dma_dev
);
if
(
!
err
)
return
0
;
shdma_cleanup
(
&
hpbdev
->
shdma_dev
);
error:
pm_runtime_disable
(
&
pdev
->
dev
);
return
err
;
}
static
void
hpb_dmae_chan_remove
(
struct
hpb_dmae_device
*
hpbdev
)
{
struct
shdma_chan
*
schan
;
int
i
;
shdma_for_each_chan
(
schan
,
&
hpbdev
->
shdma_dev
,
i
)
{
BUG_ON
(
!
schan
);
shdma_chan_remove
(
schan
);
}
}
static
int
hpb_dmae_remove
(
struct
platform_device
*
pdev
)
{
struct
hpb_dmae_device
*
hpbdev
=
platform_get_drvdata
(
pdev
);
dma_async_device_unregister
(
&
hpbdev
->
shdma_dev
.
dma_dev
);
pm_runtime_disable
(
&
pdev
->
dev
);
hpb_dmae_chan_remove
(
hpbdev
);
return
0
;
}
static
void
hpb_dmae_shutdown
(
struct
platform_device
*
pdev
)
{
struct
hpb_dmae_device
*
hpbdev
=
platform_get_drvdata
(
pdev
);
hpb_dmae_ctl_stop
(
hpbdev
);
}
static
struct
platform_driver
hpb_dmae_driver
=
{
.
probe
=
hpb_dmae_probe
,
.
remove
=
hpb_dmae_remove
,
.
shutdown
=
hpb_dmae_shutdown
,
.
driver
=
{
.
name
=
"hpb-dma-engine"
,
},
};
module_platform_driver
(
hpb_dmae_driver
);
MODULE_AUTHOR
(
"Max Filippov <max.filippov@cogentembedded.com>"
);
MODULE_DESCRIPTION
(
"Renesas HPB DMA Engine driver"
);
MODULE_LICENSE
(
"GPL"
);
include/linux/platform_data/dma-rcar-hpbdma.h
deleted
100644 → 0
View file @
c81bc960
/*
* Copyright (C) 2011-2013 Renesas Electronics Corporation
* Copyright (C) 2013 Cogent Embedded, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*/
#ifndef __DMA_RCAR_HPBDMA_H
#define __DMA_RCAR_HPBDMA_H
#include <linux/bitops.h>
#include <linux/types.h>
/* Transmit sizes and respective register values */
enum
{
XMIT_SZ_8BIT
=
0
,
XMIT_SZ_16BIT
=
1
,
XMIT_SZ_32BIT
=
2
,
XMIT_SZ_MAX
};
/* DMA control register (DCR) bits */
#define HPB_DMAE_DCR_DTAMD (1u << 26)
#define HPB_DMAE_DCR_DTAC (1u << 25)
#define HPB_DMAE_DCR_DTAU (1u << 24)
#define HPB_DMAE_DCR_DTAU1 (1u << 23)
#define HPB_DMAE_DCR_SWMD (1u << 22)
#define HPB_DMAE_DCR_BTMD (1u << 21)
#define HPB_DMAE_DCR_PKMD (1u << 20)
#define HPB_DMAE_DCR_CT (1u << 18)
#define HPB_DMAE_DCR_ACMD (1u << 17)
#define HPB_DMAE_DCR_DIP (1u << 16)
#define HPB_DMAE_DCR_SMDL (1u << 13)
#define HPB_DMAE_DCR_SPDAM (1u << 12)
#define HPB_DMAE_DCR_SDRMD_MASK (3u << 10)
#define HPB_DMAE_DCR_SDRMD_MOD (0u << 10)
#define HPB_DMAE_DCR_SDRMD_AUTO (1u << 10)
#define HPB_DMAE_DCR_SDRMD_TIMER (2u << 10)
#define HPB_DMAE_DCR_SPDS_MASK (3u << 8)
#define HPB_DMAE_DCR_SPDS_8BIT (0u << 8)
#define HPB_DMAE_DCR_SPDS_16BIT (1u << 8)
#define HPB_DMAE_DCR_SPDS_32BIT (2u << 8)
#define HPB_DMAE_DCR_DMDL (1u << 5)
#define HPB_DMAE_DCR_DPDAM (1u << 4)
#define HPB_DMAE_DCR_DDRMD_MASK (3u << 2)
#define HPB_DMAE_DCR_DDRMD_MOD (0u << 2)
#define HPB_DMAE_DCR_DDRMD_AUTO (1u << 2)
#define HPB_DMAE_DCR_DDRMD_TIMER (2u << 2)
#define HPB_DMAE_DCR_DPDS_MASK (3u << 0)
#define HPB_DMAE_DCR_DPDS_8BIT (0u << 0)
#define HPB_DMAE_DCR_DPDS_16BIT (1u << 0)
#define HPB_DMAE_DCR_DPDS_32BIT (2u << 0)
/* Asynchronous reset register (ASYNCRSTR) bits */
#define HPB_DMAE_ASYNCRSTR_ASRST41 BIT(10)
#define HPB_DMAE_ASYNCRSTR_ASRST40 BIT(9)
#define HPB_DMAE_ASYNCRSTR_ASRST39 BIT(8)
#define HPB_DMAE_ASYNCRSTR_ASRST27 BIT(7)
#define HPB_DMAE_ASYNCRSTR_ASRST26 BIT(6)
#define HPB_DMAE_ASYNCRSTR_ASRST25 BIT(5)
#define HPB_DMAE_ASYNCRSTR_ASRST24 BIT(4)
#define HPB_DMAE_ASYNCRSTR_ASRST23 BIT(3)
#define HPB_DMAE_ASYNCRSTR_ASRST22 BIT(2)
#define HPB_DMAE_ASYNCRSTR_ASRST21 BIT(1)
#define HPB_DMAE_ASYNCRSTR_ASRST20 BIT(0)
struct
hpb_dmae_slave_config
{
unsigned
int
id
;
dma_addr_t
addr
;
u32
dcr
;
u32
port
;
u32
rstr
;
u32
mdr
;
u32
mdm
;
u32
flags
;
#define HPB_DMAE_SET_ASYNC_RESET BIT(0)
#define HPB_DMAE_SET_ASYNC_MODE BIT(1)
u32
dma_ch
;
};
#define HPB_DMAE_CHANNEL(_irq, _s_id) \
{ \
.ch_irq = _irq, \
.s_id = _s_id, \
}
struct
hpb_dmae_channel
{
unsigned
int
ch_irq
;
unsigned
int
s_id
;
};
struct
hpb_dmae_pdata
{
const
struct
hpb_dmae_slave_config
*
slaves
;
int
num_slaves
;
const
struct
hpb_dmae_channel
*
channels
;
int
num_channels
;
const
unsigned
int
ts_shift
[
XMIT_SZ_MAX
];
int
num_hw_channels
;
};
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment