Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
baab8537
Commit
baab8537
authored
Aug 17, 2018
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/imx' into for-linus
parents
4d442482
0f06c027
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
380 additions
and
199 deletions
+380
-199
drivers/dma/Kconfig
drivers/dma/Kconfig
+1
-0
drivers/dma/imx-sdma.c
drivers/dma/imx-sdma.c
+379
-199
No files found.
drivers/dma/Kconfig
View file @
baab8537
...
...
@@ -250,6 +250,7 @@ config IMX_SDMA
tristate "i.MX SDMA support"
depends on ARCH_MXC
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support the i.MX SDMA engine. This engine is integrated into
Freescale i.MX25/31/35/51/53/6 chips.
...
...
drivers/dma/imx-sdma.c
View file @
baab8537
...
...
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
...
...
@@ -41,6 +42,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include "dmaengine.h"
#include "virt-dma.h"
/* SDMA registers */
#define SDMA_H_C0PTR 0x000
...
...
@@ -183,6 +185,7 @@
* Mode/Count of data node descriptors - IPCv2
*/
struct
sdma_mode_count
{
#define SDMA_BD_MAX_CNT 0xffff
u32
count
:
16
;
/* size of the buffer pointed by this BD */
u32
status
:
8
;
/* E,R,I,C,W,D status bits stored here */
u32
command
:
8
;
/* command mostly used for channel 0 */
...
...
@@ -200,9 +203,9 @@ struct sdma_buffer_descriptor {
/**
* struct sdma_channel_control - Channel control Block
*
* @current_bd_ptr current buffer descriptor processed
* @base_bd_ptr
first element of buffer descriptor array
* @unused padding. The SDMA engine expects an array of 128 byte
* @current_bd_ptr
:
current buffer descriptor processed
* @base_bd_ptr
:
first element of buffer descriptor array
* @unused
:
padding. The SDMA engine expects an array of 128 byte
* control blocks
*/
struct
sdma_channel_control
{
...
...
@@ -215,10 +218,13 @@ struct sdma_channel_control {
* struct sdma_state_registers - SDMA context for a channel
*
* @pc: program counter
* @unused1: unused
* @t: test bit: status of arithmetic & test instruction
* @rpc: return program counter
* @unused0: unused
* @sf: source fault while loading data
* @spc: loop start program counter
* @unused2: unused
* @df: destination fault while storing data
* @epc: loop end program counter
* @lm: loop mode
...
...
@@ -256,6 +262,14 @@ struct sdma_state_registers {
* @dsa: dedicated core source address register
* @ds: dedicated core status register
* @dd: dedicated core data register
* @scratch0: 1st word of dedicated ram for context switch
* @scratch1: 2nd word of dedicated ram for context switch
* @scratch2: 3rd word of dedicated ram for context switch
* @scratch3: 4th word of dedicated ram for context switch
* @scratch4: 5th word of dedicated ram for context switch
* @scratch5: 6th word of dedicated ram for context switch
* @scratch6: 7th word of dedicated ram for context switch
* @scratch7: 8th word of dedicated ram for context switch
*/
struct
sdma_context_data
{
struct
sdma_state_registers
channel_state
;
...
...
@@ -284,25 +298,67 @@ struct sdma_context_data {
u32
scratch7
;
}
__attribute__
((
packed
));
#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
struct
sdma_engine
;
/**
* struct sdma_desc - descriptor structor for one transfer
* @vd: descriptor for virt dma
* @num_bd: number of descriptors currently handling
* @bd_phys: physical address of bd
* @buf_tail: ID of the buffer that was processed
* @buf_ptail: ID of the previous buffer that was processed
* @period_len: period length, used in cyclic.
* @chn_real_count: the real count updated from bd->mode.count
* @chn_count: the transfer count set
* @sdmac: sdma_channel pointer
* @bd: pointer of allocate bd
*/
struct
sdma_desc
{
struct
virt_dma_desc
vd
;
unsigned
int
num_bd
;
dma_addr_t
bd_phys
;
unsigned
int
buf_tail
;
unsigned
int
buf_ptail
;
unsigned
int
period_len
;
unsigned
int
chn_real_count
;
unsigned
int
chn_count
;
struct
sdma_channel
*
sdmac
;
struct
sdma_buffer_descriptor
*
bd
;
};
/**
* struct sdma_channel - housekeeping for a SDMA channel
*
* @sdma pointer to the SDMA engine for this channel
* @channel the channel number, matches dmaengine chan_id + 1
* @direction transfer type. Needed for setting SDMA script
* @peripheral_type Peripheral type. Needed for setting SDMA script
* @event_id0 aka dma request line
* @event_id1 for channels that use 2 events
* @word_size peripheral access size
* @buf_tail ID of the buffer that was processed
* @buf_ptail ID of the previous buffer that was processed
* @num_bd max NUM_BD. number of descriptors currently handling
* @vc: virt_dma base structure
* @desc: sdma description including vd and other special member
* @sdma: pointer to the SDMA engine for this channel
* @channel: the channel number, matches dmaengine chan_id + 1
* @direction: transfer type. Needed for setting SDMA script
* @peripheral_type: Peripheral type. Needed for setting SDMA script
* @event_id0: aka dma request line
* @event_id1: for channels that use 2 events
* @word_size: peripheral access size
* @pc_from_device: script address for those device_2_memory
* @pc_to_device: script address for those memory_2_device
* @device_to_device: script address for those device_2_device
* @pc_to_pc: script address for those memory_2_memory
* @flags: loop mode or not
* @per_address: peripheral source or destination address in common case
* destination address in p_2_p case
* @per_address2: peripheral source address in p_2_p case
* @event_mask: event mask used in p_2_p script
* @watermark_level: value for gReg[7], some script will extend it from
* basic watermark such as p_2_p
* @shp_addr: value for gReg[6]
* @per_addr: value for gReg[2]
* @status: status of dma channel
* @data: specific sdma interface structure
* @bd_pool: dma_pool for bd
*/
struct
sdma_channel
{
struct
virt_dma_chan
vc
;
struct
sdma_desc
*
desc
;
struct
sdma_engine
*
sdma
;
unsigned
int
channel
;
enum
dma_transfer_direction
direction
;
...
...
@@ -310,28 +366,17 @@ struct sdma_channel {
unsigned
int
event_id0
;
unsigned
int
event_id1
;
enum
dma_slave_buswidth
word_size
;
unsigned
int
buf_tail
;
unsigned
int
buf_ptail
;
unsigned
int
num_bd
;
unsigned
int
period_len
;
struct
sdma_buffer_descriptor
*
bd
;
dma_addr_t
bd_phys
;
unsigned
int
pc_from_device
,
pc_to_device
;
unsigned
int
device_to_device
;
unsigned
int
pc_to_pc
;
unsigned
long
flags
;
dma_addr_t
per_address
,
per_address2
;
unsigned
long
event_mask
[
2
];
unsigned
long
watermark_level
;
u32
shp_addr
,
per_addr
;
struct
dma_chan
chan
;
spinlock_t
lock
;
struct
dma_async_tx_descriptor
desc
;
enum
dma_status
status
;
unsigned
int
chn_count
;
unsigned
int
chn_real_count
;
struct
tasklet_struct
tasklet
;
struct
imx_dma_data
data
;
bool
enabled
;
struct
dma_pool
*
bd_pool
;
};
#define IMX_DMA_SG_LOOP BIT(0)
...
...
@@ -346,15 +391,15 @@ struct sdma_channel {
/**
* struct sdma_firmware_header - Layout of the firmware image
*
* @magic "SDMA"
* @version_major
increased whenever layout of struct sdma_script_start_addrs
* changes.
* @version_minor firmware minor version (for binary compatible changes)
* @script_addrs_start offset of struct sdma_script_start_addrs in this image
* @num_script_addrs Number of script addresses in this image
* @ram_code_start offset of SDMA ram image in this firmware image
* @ram_code_size size of SDMA ram image
* @script_addrs Stores the start address of the SDMA scripts
* @magic
:
"SDMA"
* @version_major
: increased whenever layout of struct
*
sdma_script_start_addrs
changes.
* @version_minor
:
firmware minor version (for binary compatible changes)
* @script_addrs_start
:
offset of struct sdma_script_start_addrs in this image
* @num_script_addrs
:
Number of script addresses in this image
* @ram_code_start
:
offset of SDMA ram image in this firmware image
* @ram_code_size
:
size of SDMA ram image
* @script_addrs
:
Stores the start address of the SDMA scripts
* (in SDMA memory space)
*/
struct
sdma_firmware_header
{
...
...
@@ -391,6 +436,8 @@ struct sdma_engine {
u32
spba_start_addr
;
u32
spba_end_addr
;
unsigned
int
irq
;
dma_addr_t
bd0_phys
;
struct
sdma_buffer_descriptor
*
bd0
;
};
static
struct
sdma_driver_data
sdma_imx31
=
{
...
...
@@ -590,14 +637,7 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
static
void
sdma_enable_channel
(
struct
sdma_engine
*
sdma
,
int
channel
)
{
unsigned
long
flags
;
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
channel
];
writel
(
BIT
(
channel
),
sdma
->
regs
+
SDMA_H_START
);
spin_lock_irqsave
(
&
sdmac
->
lock
,
flags
);
sdmac
->
enabled
=
true
;
spin_unlock_irqrestore
(
&
sdmac
->
lock
,
flags
);
}
/*
...
...
@@ -625,7 +665,7 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
static
int
sdma_load_script
(
struct
sdma_engine
*
sdma
,
void
*
buf
,
int
size
,
u32
address
)
{
struct
sdma_buffer_descriptor
*
bd0
=
sdma
->
channel
[
0
].
bd
;
struct
sdma_buffer_descriptor
*
bd0
=
sdma
->
bd0
;
void
*
buf_virt
;
dma_addr_t
buf_phys
;
int
ret
;
...
...
@@ -681,26 +721,49 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
writel_relaxed
(
val
,
sdma
->
regs
+
chnenbl
);
}
static
struct
sdma_desc
*
to_sdma_desc
(
struct
dma_async_tx_descriptor
*
t
)
{
return
container_of
(
t
,
struct
sdma_desc
,
vd
.
tx
);
}
static
void
sdma_start_desc
(
struct
sdma_channel
*
sdmac
)
{
struct
virt_dma_desc
*
vd
=
vchan_next_desc
(
&
sdmac
->
vc
);
struct
sdma_desc
*
desc
;
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
channel
=
sdmac
->
channel
;
if
(
!
vd
)
{
sdmac
->
desc
=
NULL
;
return
;
}
sdmac
->
desc
=
desc
=
to_sdma_desc
(
&
vd
->
tx
);
/*
* Do not delete the node in desc_issued list in cyclic mode, otherwise
* the desc allocated will never be freed in vchan_dma_desc_free_list
*/
if
(
!
(
sdmac
->
flags
&
IMX_DMA_SG_LOOP
))
list_del
(
&
vd
->
node
);
sdma
->
channel_control
[
channel
].
base_bd_ptr
=
desc
->
bd_phys
;
sdma
->
channel_control
[
channel
].
current_bd_ptr
=
desc
->
bd_phys
;
sdma_enable_channel
(
sdma
,
sdmac
->
channel
);
}
static
void
sdma_update_channel_loop
(
struct
sdma_channel
*
sdmac
)
{
struct
sdma_buffer_descriptor
*
bd
;
int
error
=
0
;
enum
dma_status
old_status
=
sdmac
->
status
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
sdmac
->
lock
,
flags
);
if
(
!
sdmac
->
enabled
)
{
spin_unlock_irqrestore
(
&
sdmac
->
lock
,
flags
);
return
;
}
spin_unlock_irqrestore
(
&
sdmac
->
lock
,
flags
);
/*
* loop mode. Iterate over descriptors, re-setup them and
* call callback function.
*/
while
(
1
)
{
bd
=
&
sdmac
->
bd
[
sdmac
->
buf_tail
];
while
(
sdmac
->
desc
)
{
struct
sdma_desc
*
desc
=
sdmac
->
desc
;
bd
=
&
desc
->
bd
[
desc
->
buf_tail
];
if
(
bd
->
mode
.
status
&
BD_DONE
)
break
;
...
...
@@ -716,11 +779,11 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* the number of bytes present in the current buffer descriptor.
*/
sdma
c
->
chn_real_count
=
bd
->
mode
.
count
;
des
c
->
chn_real_count
=
bd
->
mode
.
count
;
bd
->
mode
.
status
|=
BD_DONE
;
bd
->
mode
.
count
=
sdma
c
->
period_len
;
sdmac
->
buf_ptail
=
sdma
c
->
buf_tail
;
sdmac
->
buf_tail
=
(
sdmac
->
buf_tail
+
1
)
%
sdma
c
->
num_bd
;
bd
->
mode
.
count
=
des
c
->
period_len
;
desc
->
buf_ptail
=
des
c
->
buf_tail
;
desc
->
buf_tail
=
(
desc
->
buf_tail
+
1
)
%
des
c
->
num_bd
;
/*
* The callback is called from the interrupt context in order
...
...
@@ -728,41 +791,38 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* SDMA transaction status by the time the client tasklet is
* executed.
*/
dmaengine_desc_get_callback_invoke
(
&
sdmac
->
desc
,
NULL
);
spin_unlock
(
&
sdmac
->
vc
.
lock
);
dmaengine_desc_get_callback_invoke
(
&
desc
->
vd
.
tx
,
NULL
);
spin_lock
(
&
sdmac
->
vc
.
lock
);
if
(
error
)
sdmac
->
status
=
old_status
;
}
}
static
void
mxc_sdma_handle_channel_normal
(
unsigned
long
data
)
static
void
mxc_sdma_handle_channel_normal
(
struct
sdma_channel
*
data
)
{
struct
sdma_channel
*
sdmac
=
(
struct
sdma_channel
*
)
data
;
struct
sdma_buffer_descriptor
*
bd
;
int
i
,
error
=
0
;
sdmac
->
chn_real_count
=
0
;
sdmac
->
desc
->
chn_real_count
=
0
;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
*/
for
(
i
=
0
;
i
<
sdmac
->
num_bd
;
i
++
)
{
bd
=
&
sdmac
->
bd
[
i
];
for
(
i
=
0
;
i
<
sdmac
->
desc
->
num_bd
;
i
++
)
{
bd
=
&
sdmac
->
desc
->
bd
[
i
];
if
(
bd
->
mode
.
status
&
(
BD_DONE
|
BD_RROR
))
error
=
-
EIO
;
sdmac
->
chn_real_count
+=
bd
->
mode
.
count
;
sdmac
->
desc
->
chn_real_count
+=
bd
->
mode
.
count
;
}
if
(
error
)
sdmac
->
status
=
DMA_ERROR
;
else
sdmac
->
status
=
DMA_COMPLETE
;
dma_cookie_complete
(
&
sdmac
->
desc
);
dmaengine_desc_get_callback_invoke
(
&
sdmac
->
desc
,
NULL
);
}
static
irqreturn_t
sdma_int_handler
(
int
irq
,
void
*
dev_id
)
...
...
@@ -778,12 +838,21 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
while
(
stat
)
{
int
channel
=
fls
(
stat
)
-
1
;
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
channel
];
struct
sdma_desc
*
desc
;
spin_lock
(
&
sdmac
->
vc
.
lock
);
desc
=
sdmac
->
desc
;
if
(
desc
)
{
if
(
sdmac
->
flags
&
IMX_DMA_SG_LOOP
)
{
sdma_update_channel_loop
(
sdmac
);
}
else
{
mxc_sdma_handle_channel_normal
(
sdmac
);
vchan_cookie_complete
(
&
desc
->
vd
);
sdma_start_desc
(
sdmac
);
}
}
if
(
sdmac
->
flags
&
IMX_DMA_SG_LOOP
)
sdma_update_channel_loop
(
sdmac
);
else
tasklet_schedule
(
&
sdmac
->
tasklet
);
spin_unlock
(
&
sdmac
->
vc
.
lock
);
__clear_bit
(
channel
,
&
stat
);
}
...
...
@@ -802,14 +871,16 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
* These are needed once we start to support transfers between
* two peripherals or memory-to-memory transfers
*/
int
per_2_per
=
0
;
int
per_2_per
=
0
,
emi_2_emi
=
0
;
sdmac
->
pc_from_device
=
0
;
sdmac
->
pc_to_device
=
0
;
sdmac
->
device_to_device
=
0
;
sdmac
->
pc_to_pc
=
0
;
switch
(
peripheral_type
)
{
case
IMX_DMATYPE_MEMORY
:
emi_2_emi
=
sdma
->
script_addrs
->
ap_2_ap_addr
;
break
;
case
IMX_DMATYPE_DSP
:
emi_2_per
=
sdma
->
script_addrs
->
bp_2_ap_addr
;
...
...
@@ -882,6 +953,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
sdmac
->
pc_from_device
=
per_2_emi
;
sdmac
->
pc_to_device
=
emi_2_per
;
sdmac
->
device_to_device
=
per_2_per
;
sdmac
->
pc_to_pc
=
emi_2_emi
;
}
static
int
sdma_load_context
(
struct
sdma_channel
*
sdmac
)
...
...
@@ -890,7 +962,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
int
channel
=
sdmac
->
channel
;
int
load_address
;
struct
sdma_context_data
*
context
=
sdma
->
context
;
struct
sdma_buffer_descriptor
*
bd0
=
sdma
->
channel
[
0
].
bd
;
struct
sdma_buffer_descriptor
*
bd0
=
sdma
->
bd0
;
int
ret
;
unsigned
long
flags
;
...
...
@@ -898,6 +970,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
load_address
=
sdmac
->
pc_from_device
;
else
if
(
sdmac
->
direction
==
DMA_DEV_TO_DEV
)
load_address
=
sdmac
->
device_to_device
;
else
if
(
sdmac
->
direction
==
DMA_MEM_TO_MEM
)
load_address
=
sdmac
->
pc_to_pc
;
else
load_address
=
sdmac
->
pc_to_device
;
...
...
@@ -939,7 +1013,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
static
struct
sdma_channel
*
to_sdma_chan
(
struct
dma_chan
*
chan
)
{
return
container_of
(
chan
,
struct
sdma_channel
,
chan
);
return
container_of
(
chan
,
struct
sdma_channel
,
vc
.
chan
);
}
static
int
sdma_disable_channel
(
struct
dma_chan
*
chan
)
...
...
@@ -947,21 +1021,25 @@ static int sdma_disable_channel(struct dma_chan *chan)
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
channel
=
sdmac
->
channel
;
unsigned
long
flags
;
writel_relaxed
(
BIT
(
channel
),
sdma
->
regs
+
SDMA_H_STATSTOP
);
sdmac
->
status
=
DMA_ERROR
;
spin_lock_irqsave
(
&
sdmac
->
lock
,
flags
);
sdmac
->
enabled
=
false
;
spin_unlock_irqrestore
(
&
sdmac
->
lock
,
flags
);
return
0
;
}
static
int
sdma_disable_channel_with_delay
(
struct
dma_chan
*
chan
)
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
unsigned
long
flags
;
LIST_HEAD
(
head
);
sdma_disable_channel
(
chan
);
spin_lock_irqsave
(
&
sdmac
->
vc
.
lock
,
flags
);
vchan_get_all_descriptors
(
&
sdmac
->
vc
,
&
head
);
sdmac
->
desc
=
NULL
;
spin_unlock_irqrestore
(
&
sdmac
->
vc
.
lock
,
flags
);
vchan_dma_desc_free_list
(
&
sdmac
->
vc
,
&
head
);
/*
* According to NXP R&D team a delay of one BD SDMA cost time
...
...
@@ -1090,52 +1168,81 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
return
0
;
}
static
int
sdma_request_channel
(
struct
sdma_channel
*
sdmac
)
static
int
sdma_request_channel
0
(
struct
sdma_engine
*
sdma
)
{
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
channel
=
sdmac
->
channel
;
int
ret
=
-
EBUSY
;
sdma
c
->
bd
=
dma_zalloc_coherent
(
NULL
,
PAGE_SIZE
,
&
sdmac
->
bd
_phys
,
GFP_
KERNEL
);
if
(
!
sdma
c
->
bd
)
{
sdma
->
bd0
=
dma_zalloc_coherent
(
NULL
,
PAGE_SIZE
,
&
sdma
->
bd0
_phys
,
GFP_
NOWAIT
);
if
(
!
sdma
->
bd0
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
sdma
->
channel_control
[
channel
].
base_bd_ptr
=
sdmac
->
bd
_phys
;
sdma
->
channel_control
[
channel
].
current_bd_ptr
=
sdmac
->
bd
_phys
;
sdma
->
channel_control
[
0
].
base_bd_ptr
=
sdma
->
bd0
_phys
;
sdma
->
channel_control
[
0
].
current_bd_ptr
=
sdma
->
bd0
_phys
;
sdma_set_channel_priority
(
sdmac
,
MXC_SDMA_DEFAULT_PRIORITY
);
sdma_set_channel_priority
(
&
sdma
->
channel
[
0
]
,
MXC_SDMA_DEFAULT_PRIORITY
);
return
0
;
out:
return
ret
;
}
static
dma_cookie_t
sdma_tx_submit
(
struct
dma_async_tx_descriptor
*
tx
)
static
int
sdma_alloc_bd
(
struct
sdma_desc
*
desc
)
{
unsigned
long
flags
;
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
tx
->
chan
);
dma_cookie_t
cookie
;
int
ret
=
0
;
spin_lock_irqsave
(
&
sdmac
->
lock
,
flags
);
desc
->
bd
=
dma_pool_alloc
(
desc
->
sdmac
->
bd_pool
,
GFP_NOWAIT
,
&
desc
->
bd_phys
);
if
(
!
desc
->
bd
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
out:
return
ret
;
}
cookie
=
dma_cookie_assign
(
tx
);
static
void
sdma_free_bd
(
struct
sdma_desc
*
desc
)
{
dma_pool_free
(
desc
->
sdmac
->
bd_pool
,
desc
->
bd
,
desc
->
bd_phys
);
}
spin_unlock_irqrestore
(
&
sdmac
->
lock
,
flags
);
static
void
sdma_desc_free
(
struct
virt_dma_desc
*
vd
)
{
struct
sdma_desc
*
desc
=
container_of
(
vd
,
struct
sdma_desc
,
vd
);
return
cookie
;
sdma_free_bd
(
desc
);
kfree
(
desc
);
}
static
int
sdma_alloc_chan_resources
(
struct
dma_chan
*
chan
)
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
imx_dma_data
*
data
=
chan
->
private
;
struct
imx_dma_data
mem_data
;
int
prio
,
ret
;
if
(
!
data
)
return
-
EINVAL
;
/*
* MEMCPY may never setup chan->private by filter function such as
* dmatest, thus create 'struct imx_dma_data mem_data' for this case.
* Please note in any other slave case, you have to setup chan->private
* with 'struct imx_dma_data' in your own filter function if you want to
* request dma channel by dma_request_channel() rather than
* dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
* to warn you to correct your filter function.
*/
if
(
!
data
)
{
dev_dbg
(
sdmac
->
sdma
->
dev
,
"MEMCPY in case?
\n
"
);
mem_data
.
priority
=
2
;
mem_data
.
peripheral_type
=
IMX_DMATYPE_MEMORY
;
mem_data
.
dma_request
=
0
;
mem_data
.
dma_request2
=
0
;
data
=
&
mem_data
;
sdma_get_pc
(
sdmac
,
IMX_DMATYPE_MEMORY
);
}
switch
(
data
->
priority
)
{
case
DMA_PRIO_HIGH
:
...
...
@@ -1161,18 +1268,13 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
if
(
ret
)
goto
disable_clk_ipg
;
ret
=
sdma_request_channel
(
sdmac
);
if
(
ret
)
goto
disable_clk_ahb
;
ret
=
sdma_set_channel_priority
(
sdmac
,
prio
);
if
(
ret
)
goto
disable_clk_ahb
;
dma_async_tx_descriptor_init
(
&
sdmac
->
desc
,
chan
);
sdmac
->
desc
.
tx_submit
=
sdma_tx_submit
;
/* txd.flags will be overwritten in prep funcs */
sdmac
->
desc
.
flags
=
DMA_CTRL_ACK
;
sdmac
->
bd_pool
=
dma_pool_create
(
"bd_pool"
,
chan
->
device
->
dev
,
sizeof
(
struct
sdma_buffer_descriptor
),
32
,
0
);
return
0
;
...
...
@@ -1188,7 +1290,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
sdma_disable_channel
(
chan
);
sdma_disable_channel
_with_delay
(
chan
);
if
(
sdmac
->
event_id0
)
sdma_event_disable
(
sdmac
,
sdmac
->
event_id0
);
...
...
@@ -1200,10 +1302,105 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_set_channel_priority
(
sdmac
,
0
);
dma_free_coherent
(
NULL
,
PAGE_SIZE
,
sdmac
->
bd
,
sdmac
->
bd_phys
);
clk_disable
(
sdma
->
clk_ipg
);
clk_disable
(
sdma
->
clk_ahb
);
dma_pool_destroy
(
sdmac
->
bd_pool
);
sdmac
->
bd_pool
=
NULL
;
}
static
struct
sdma_desc
*
sdma_transfer_init
(
struct
sdma_channel
*
sdmac
,
enum
dma_transfer_direction
direction
,
u32
bds
)
{
struct
sdma_desc
*
desc
;
desc
=
kzalloc
((
sizeof
(
*
desc
)),
GFP_NOWAIT
);
if
(
!
desc
)
goto
err_out
;
sdmac
->
status
=
DMA_IN_PROGRESS
;
sdmac
->
direction
=
direction
;
sdmac
->
flags
=
0
;
desc
->
chn_count
=
0
;
desc
->
chn_real_count
=
0
;
desc
->
buf_tail
=
0
;
desc
->
buf_ptail
=
0
;
desc
->
sdmac
=
sdmac
;
desc
->
num_bd
=
bds
;
if
(
sdma_alloc_bd
(
desc
))
goto
err_desc_out
;
/* No slave_config called in MEMCPY case, so do here */
if
(
direction
==
DMA_MEM_TO_MEM
)
sdma_config_ownership
(
sdmac
,
false
,
true
,
false
);
if
(
sdma_load_context
(
sdmac
))
goto
err_desc_out
;
return
desc
;
err_desc_out:
kfree
(
desc
);
err_out:
return
NULL
;
}
static
struct
dma_async_tx_descriptor
*
sdma_prep_memcpy
(
struct
dma_chan
*
chan
,
dma_addr_t
dma_dst
,
dma_addr_t
dma_src
,
size_t
len
,
unsigned
long
flags
)
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
channel
=
sdmac
->
channel
;
size_t
count
;
int
i
=
0
,
param
;
struct
sdma_buffer_descriptor
*
bd
;
struct
sdma_desc
*
desc
;
if
(
!
chan
||
!
len
)
return
NULL
;
dev_dbg
(
sdma
->
dev
,
"memcpy: %pad->%pad, len=%zu, channel=%d.
\n
"
,
&
dma_src
,
&
dma_dst
,
len
,
channel
);
desc
=
sdma_transfer_init
(
sdmac
,
DMA_MEM_TO_MEM
,
len
/
SDMA_BD_MAX_CNT
+
1
);
if
(
!
desc
)
return
NULL
;
do
{
count
=
min_t
(
size_t
,
len
,
SDMA_BD_MAX_CNT
);
bd
=
&
desc
->
bd
[
i
];
bd
->
buffer_addr
=
dma_src
;
bd
->
ext_buffer_addr
=
dma_dst
;
bd
->
mode
.
count
=
count
;
desc
->
chn_count
+=
count
;
bd
->
mode
.
command
=
0
;
dma_src
+=
count
;
dma_dst
+=
count
;
len
-=
count
;
i
++
;
param
=
BD_DONE
|
BD_EXTD
|
BD_CONT
;
/* last bd */
if
(
!
len
)
{
param
|=
BD_INTR
;
param
|=
BD_LAST
;
param
&=
~
BD_CONT
;
}
dev_dbg
(
sdma
->
dev
,
"entry %d: count: %zd dma: 0x%x %s%s
\n
"
,
i
,
count
,
bd
->
buffer_addr
,
param
&
BD_WRAP
?
"wrap"
:
""
,
param
&
BD_INTR
?
" intr"
:
""
);
bd
->
mode
.
status
=
param
;
}
while
(
len
);
return
vchan_tx_prep
(
&
sdmac
->
vc
,
&
desc
->
vd
,
flags
);
}
static
struct
dma_async_tx_descriptor
*
sdma_prep_slave_sg
(
...
...
@@ -1213,75 +1410,54 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
ret
,
i
,
count
;
int
i
,
count
;
int
channel
=
sdmac
->
channel
;
struct
scatterlist
*
sg
;
struct
sdma_desc
*
desc
;
if
(
sdmac
->
status
==
DMA_IN_PROGRESS
)
return
NULL
;
sdmac
->
status
=
DMA_IN_PROGRESS
;
sdmac
->
flags
=
0
;
sdmac
->
buf_tail
=
0
;
sdmac
->
buf_ptail
=
0
;
sdmac
->
chn_real_count
=
0
;
desc
=
sdma_transfer_init
(
sdmac
,
direction
,
sg_len
);
if
(
!
desc
)
goto
err_out
;
dev_dbg
(
sdma
->
dev
,
"setting up %d entries for channel %d.
\n
"
,
sg_len
,
channel
);
sdmac
->
direction
=
direction
;
ret
=
sdma_load_context
(
sdmac
);
if
(
ret
)
goto
err_out
;
if
(
sg_len
>
NUM_BD
)
{
dev_err
(
sdma
->
dev
,
"SDMA channel %d: maximum number of sg exceeded: %d > %d
\n
"
,
channel
,
sg_len
,
NUM_BD
);
ret
=
-
EINVAL
;
goto
err_out
;
}
sdmac
->
chn_count
=
0
;
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
struct
sdma_buffer_descriptor
*
bd
=
&
sdma
c
->
bd
[
i
];
struct
sdma_buffer_descriptor
*
bd
=
&
des
c
->
bd
[
i
];
int
param
;
bd
->
buffer_addr
=
sg
->
dma_address
;
count
=
sg_dma_len
(
sg
);
if
(
count
>
0xffff
)
{
if
(
count
>
SDMA_BD_MAX_CNT
)
{
dev_err
(
sdma
->
dev
,
"SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d
\n
"
,
channel
,
count
,
0xffff
);
ret
=
-
EINVAL
;
goto
err_out
;
channel
,
count
,
SDMA_BD_MAX_CNT
);
goto
err_bd_out
;
}
bd
->
mode
.
count
=
count
;
sdma
c
->
chn_count
+=
count
;
des
c
->
chn_count
+=
count
;
if
(
sdmac
->
word_size
>
DMA_SLAVE_BUSWIDTH_4_BYTES
)
{
ret
=
-
EINVAL
;
goto
err_out
;
}
if
(
sdmac
->
word_size
>
DMA_SLAVE_BUSWIDTH_4_BYTES
)
goto
err_bd_out
;
switch
(
sdmac
->
word_size
)
{
case
DMA_SLAVE_BUSWIDTH_4_BYTES
:
bd
->
mode
.
command
=
0
;
if
(
count
&
3
||
sg
->
dma_address
&
3
)
return
NULL
;
goto
err_bd_out
;
break
;
case
DMA_SLAVE_BUSWIDTH_2_BYTES
:
bd
->
mode
.
command
=
2
;
if
(
count
&
1
||
sg
->
dma_address
&
1
)
return
NULL
;
goto
err_bd_out
;
break
;
case
DMA_SLAVE_BUSWIDTH_1_BYTE
:
bd
->
mode
.
command
=
1
;
break
;
default:
return
NULL
;
goto
err_bd_out
;
}
param
=
BD_DONE
|
BD_EXTD
|
BD_CONT
;
...
...
@@ -1300,10 +1476,10 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
bd
->
mode
.
status
=
param
;
}
sdmac
->
num_bd
=
sg_len
;
sdma
->
channel_control
[
channel
].
current_bd_ptr
=
sdmac
->
bd_phys
;
return
&
sdmac
->
desc
;
return
vchan_tx_prep
(
&
sdmac
->
vc
,
&
desc
->
vd
,
flags
)
;
err_bd_out:
sdma_free_bd
(
desc
);
kfree
(
desc
)
;
err_out:
sdmac
->
status
=
DMA_ERROR
;
return
NULL
;
...
...
@@ -1318,40 +1494,27 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
num_periods
=
buf_len
/
period_len
;
int
channel
=
sdmac
->
channel
;
int
ret
,
i
=
0
,
buf
=
0
;
int
i
=
0
,
buf
=
0
;
struct
sdma_desc
*
desc
;
dev_dbg
(
sdma
->
dev
,
"%s channel: %d
\n
"
,
__func__
,
channel
);
if
(
sdmac
->
status
==
DMA_IN_PROGRESS
)
return
NULL
;
sdmac
->
status
=
DMA_IN_PROGRESS
;
desc
=
sdma_transfer_init
(
sdmac
,
direction
,
num_periods
);
if
(
!
desc
)
goto
err_out
;
sdmac
->
buf_tail
=
0
;
sdmac
->
buf_ptail
=
0
;
sdmac
->
chn_real_count
=
0
;
sdmac
->
period_len
=
period_len
;
desc
->
period_len
=
period_len
;
sdmac
->
flags
|=
IMX_DMA_SG_LOOP
;
sdmac
->
direction
=
direction
;
ret
=
sdma_load_context
(
sdmac
);
if
(
ret
)
goto
err_out
;
if
(
num_periods
>
NUM_BD
)
{
dev_err
(
sdma
->
dev
,
"SDMA channel %d: maximum number of sg exceeded: %d > %d
\n
"
,
channel
,
num_periods
,
NUM_BD
);
goto
err_out
;
}
if
(
period_len
>
0xffff
)
{
if
(
period_len
>
SDMA_BD_MAX_CNT
)
{
dev_err
(
sdma
->
dev
,
"SDMA channel %d: maximum period size exceeded: %zu > %d
\n
"
,
channel
,
period_len
,
0xffff
);
goto
err_out
;
channel
,
period_len
,
SDMA_BD_MAX_CNT
);
goto
err_
bd_
out
;
}
while
(
buf
<
buf_len
)
{
struct
sdma_buffer_descriptor
*
bd
=
&
sdma
c
->
bd
[
i
];
struct
sdma_buffer_descriptor
*
bd
=
&
des
c
->
bd
[
i
];
int
param
;
bd
->
buffer_addr
=
dma_addr
;
...
...
@@ -1359,7 +1522,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
bd
->
mode
.
count
=
period_len
;
if
(
sdmac
->
word_size
>
DMA_SLAVE_BUSWIDTH_4_BYTES
)
goto
err_out
;
goto
err_
bd_
out
;
if
(
sdmac
->
word_size
==
DMA_SLAVE_BUSWIDTH_4_BYTES
)
bd
->
mode
.
command
=
0
;
else
...
...
@@ -1382,10 +1545,10 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
i
++
;
}
sdmac
->
num_bd
=
num_periods
;
sdma
->
channel_control
[
channel
].
current_bd_ptr
=
sdmac
->
bd_phys
;
return
&
sdmac
->
desc
;
return
vchan_tx_prep
(
&
sdmac
->
vc
,
&
desc
->
vd
,
flags
)
;
err_bd_out:
sdma_free_bd
(
desc
);
kfree
(
desc
)
;
err_out:
sdmac
->
status
=
DMA_ERROR
;
return
NULL
;
...
...
@@ -1424,13 +1587,31 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
struct
dma_tx_state
*
txstate
)
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_desc
*
desc
;
u32
residue
;
struct
virt_dma_desc
*
vd
;
enum
dma_status
ret
;
unsigned
long
flags
;
if
(
sdmac
->
flags
&
IMX_DMA_SG_LOOP
)
residue
=
(
sdmac
->
num_bd
-
sdmac
->
buf_ptail
)
*
sdmac
->
period_len
-
sdmac
->
chn_real_count
;
else
residue
=
sdmac
->
chn_count
-
sdmac
->
chn_real_count
;
ret
=
dma_cookie_status
(
chan
,
cookie
,
txstate
);
if
(
ret
==
DMA_COMPLETE
||
!
txstate
)
return
ret
;
spin_lock_irqsave
(
&
sdmac
->
vc
.
lock
,
flags
);
vd
=
vchan_find_desc
(
&
sdmac
->
vc
,
cookie
);
if
(
vd
)
{
desc
=
to_sdma_desc
(
&
vd
->
tx
);
if
(
sdmac
->
flags
&
IMX_DMA_SG_LOOP
)
residue
=
(
desc
->
num_bd
-
desc
->
buf_ptail
)
*
desc
->
period_len
-
desc
->
chn_real_count
;
else
residue
=
desc
->
chn_count
-
desc
->
chn_real_count
;
}
else
if
(
sdmac
->
desc
&&
sdmac
->
desc
->
vd
.
tx
.
cookie
==
cookie
)
{
residue
=
sdmac
->
desc
->
chn_count
-
sdmac
->
desc
->
chn_real_count
;
}
else
{
residue
=
0
;
}
spin_unlock_irqrestore
(
&
sdmac
->
vc
.
lock
,
flags
);
dma_set_tx_state
(
txstate
,
chan
->
completed_cookie
,
chan
->
cookie
,
residue
);
...
...
@@ -1441,10 +1622,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
static
void
sdma_issue_pending
(
struct
dma_chan
*
chan
)
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
unsigned
long
flags
;
if
(
sdmac
->
status
==
DMA_IN_PROGRESS
)
sdma_enable_channel
(
sdma
,
sdmac
->
channel
);
spin_lock_irqsave
(
&
sdmac
->
vc
.
lock
,
flags
);
if
(
vchan_issue_pending
(
&
sdmac
->
vc
)
&&
!
sdmac
->
desc
)
sdma_start_desc
(
sdmac
);
spin_unlock_irqrestore
(
&
sdmac
->
vc
.
lock
,
flags
);
}
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
...
...
@@ -1650,7 +1833,7 @@ static int sdma_init(struct sdma_engine *sdma)
for
(
i
=
0
;
i
<
MAX_DMA_CHANNELS
;
i
++
)
writel_relaxed
(
0
,
sdma
->
regs
+
SDMA_CHNPRI_0
+
i
*
4
);
ret
=
sdma_request_channel
(
&
sdma
->
channel
[
0
]
);
ret
=
sdma_request_channel
0
(
sdma
);
if
(
ret
)
goto
err_dma_alloc
;
...
...
@@ -1805,6 +1988,7 @@ static int sdma_probe(struct platform_device *pdev)
dma_cap_set
(
DMA_SLAVE
,
sdma
->
dma_device
.
cap_mask
);
dma_cap_set
(
DMA_CYCLIC
,
sdma
->
dma_device
.
cap_mask
);
dma_cap_set
(
DMA_MEMCPY
,
sdma
->
dma_device
.
cap_mask
);
INIT_LIST_HEAD
(
&
sdma
->
dma_device
.
channels
);
/* Initialize channel parameters */
...
...
@@ -1812,22 +1996,16 @@ static int sdma_probe(struct platform_device *pdev)
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
i
];
sdmac
->
sdma
=
sdma
;
spin_lock_init
(
&
sdmac
->
lock
);
sdmac
->
chan
.
device
=
&
sdma
->
dma_device
;
dma_cookie_init
(
&
sdmac
->
chan
);
sdmac
->
channel
=
i
;
tasklet_init
(
&
sdmac
->
tasklet
,
mxc_sdma_handle_channel_normal
,
(
unsigned
long
)
sdmac
);
sdmac
->
vc
.
desc_free
=
sdma_desc_free
;
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
* that channel 0 in dmaengine counting matches sdma channel 1.
*/
if
(
i
)
list_add_tail
(
&
sdmac
->
chan
.
device_node
,
&
sdma
->
dma_device
.
channels
);
vchan_init
(
&
sdmac
->
vc
,
&
sdma
->
dma_device
);
}
ret
=
sdma_init
(
sdma
);
...
...
@@ -1877,9 +2055,10 @@ static int sdma_probe(struct platform_device *pdev)
sdma
->
dma_device
.
dst_addr_widths
=
SDMA_DMA_BUSWIDTHS
;
sdma
->
dma_device
.
directions
=
SDMA_DMA_DIRECTIONS
;
sdma
->
dma_device
.
residue_granularity
=
DMA_RESIDUE_GRANULARITY_SEGMENT
;
sdma
->
dma_device
.
device_prep_dma_memcpy
=
sdma_prep_memcpy
;
sdma
->
dma_device
.
device_issue_pending
=
sdma_issue_pending
;
sdma
->
dma_device
.
dev
->
dma_parms
=
&
sdma
->
dma_parms
;
dma_set_max_seg_size
(
sdma
->
dma_device
.
dev
,
65535
);
dma_set_max_seg_size
(
sdma
->
dma_device
.
dev
,
SDMA_BD_MAX_CNT
);
platform_set_drvdata
(
pdev
,
sdma
);
...
...
@@ -1932,7 +2111,8 @@ static int sdma_remove(struct platform_device *pdev)
for
(
i
=
0
;
i
<
MAX_DMA_CHANNELS
;
i
++
)
{
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
i
];
tasklet_kill
(
&
sdmac
->
tasklet
);
tasklet_kill
(
&
sdmac
->
vc
.
task
);
sdma_free_chan_resources
(
&
sdmac
->
vc
.
chan
);
}
platform_set_drvdata
(
pdev
,
NULL
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment