Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
baab8537
Commit
baab8537
authored
Aug 17, 2018
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/imx' into for-linus
parents
4d442482
0f06c027
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
380 additions
and
199 deletions
+380
-199
drivers/dma/Kconfig
drivers/dma/Kconfig
+1
-0
drivers/dma/imx-sdma.c
drivers/dma/imx-sdma.c
+379
-199
No files found.
drivers/dma/Kconfig
View file @
baab8537
...
@@ -250,6 +250,7 @@ config IMX_SDMA
...
@@ -250,6 +250,7 @@ config IMX_SDMA
tristate "i.MX SDMA support"
tristate "i.MX SDMA support"
depends on ARCH_MXC
depends on ARCH_MXC
select DMA_ENGINE
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
help
Support the i.MX SDMA engine. This engine is integrated into
Support the i.MX SDMA engine. This engine is integrated into
Freescale i.MX25/31/35/51/53/6 chips.
Freescale i.MX25/31/35/51/53/6 chips.
...
...
drivers/dma/imx-sdma.c
View file @
baab8537
...
@@ -24,6 +24,7 @@
...
@@ -24,6 +24,7 @@
#include <linux/spinlock.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/firmware.h>
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/platform_device.h>
...
@@ -41,6 +42,7 @@
...
@@ -41,6 +42,7 @@
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
#include "dmaengine.h"
#include "dmaengine.h"
#include "virt-dma.h"
/* SDMA registers */
/* SDMA registers */
#define SDMA_H_C0PTR 0x000
#define SDMA_H_C0PTR 0x000
...
@@ -183,6 +185,7 @@
...
@@ -183,6 +185,7 @@
* Mode/Count of data node descriptors - IPCv2
* Mode/Count of data node descriptors - IPCv2
*/
*/
struct
sdma_mode_count
{
struct
sdma_mode_count
{
#define SDMA_BD_MAX_CNT 0xffff
u32
count
:
16
;
/* size of the buffer pointed by this BD */
u32
count
:
16
;
/* size of the buffer pointed by this BD */
u32
status
:
8
;
/* E,R,I,C,W,D status bits stored here */
u32
status
:
8
;
/* E,R,I,C,W,D status bits stored here */
u32
command
:
8
;
/* command mostly used for channel 0 */
u32
command
:
8
;
/* command mostly used for channel 0 */
...
@@ -200,9 +203,9 @@ struct sdma_buffer_descriptor {
...
@@ -200,9 +203,9 @@ struct sdma_buffer_descriptor {
/**
/**
* struct sdma_channel_control - Channel control Block
* struct sdma_channel_control - Channel control Block
*
*
* @current_bd_ptr current buffer descriptor processed
* @current_bd_ptr
:
current buffer descriptor processed
* @base_bd_ptr
first element of buffer descriptor array
* @base_bd_ptr
:
first element of buffer descriptor array
* @unused padding. The SDMA engine expects an array of 128 byte
* @unused
:
padding. The SDMA engine expects an array of 128 byte
* control blocks
* control blocks
*/
*/
struct
sdma_channel_control
{
struct
sdma_channel_control
{
...
@@ -215,10 +218,13 @@ struct sdma_channel_control {
...
@@ -215,10 +218,13 @@ struct sdma_channel_control {
* struct sdma_state_registers - SDMA context for a channel
* struct sdma_state_registers - SDMA context for a channel
*
*
* @pc: program counter
* @pc: program counter
* @unused1: unused
* @t: test bit: status of arithmetic & test instruction
* @t: test bit: status of arithmetic & test instruction
* @rpc: return program counter
* @rpc: return program counter
* @unused0: unused
* @sf: source fault while loading data
* @sf: source fault while loading data
* @spc: loop start program counter
* @spc: loop start program counter
* @unused2: unused
* @df: destination fault while storing data
* @df: destination fault while storing data
* @epc: loop end program counter
* @epc: loop end program counter
* @lm: loop mode
* @lm: loop mode
...
@@ -256,6 +262,14 @@ struct sdma_state_registers {
...
@@ -256,6 +262,14 @@ struct sdma_state_registers {
* @dsa: dedicated core source address register
* @dsa: dedicated core source address register
* @ds: dedicated core status register
* @ds: dedicated core status register
* @dd: dedicated core data register
* @dd: dedicated core data register
* @scratch0: 1st word of dedicated ram for context switch
* @scratch1: 2nd word of dedicated ram for context switch
* @scratch2: 3rd word of dedicated ram for context switch
* @scratch3: 4th word of dedicated ram for context switch
* @scratch4: 5th word of dedicated ram for context switch
* @scratch5: 6th word of dedicated ram for context switch
* @scratch6: 7th word of dedicated ram for context switch
* @scratch7: 8th word of dedicated ram for context switch
*/
*/
struct
sdma_context_data
{
struct
sdma_context_data
{
struct
sdma_state_registers
channel_state
;
struct
sdma_state_registers
channel_state
;
...
@@ -284,25 +298,67 @@ struct sdma_context_data {
...
@@ -284,25 +298,67 @@ struct sdma_context_data {
u32
scratch7
;
u32
scratch7
;
}
__attribute__
((
packed
));
}
__attribute__
((
packed
));
#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
struct
sdma_engine
;
struct
sdma_engine
;
/**
* struct sdma_desc - descriptor structor for one transfer
* @vd: descriptor for virt dma
* @num_bd: number of descriptors currently handling
* @bd_phys: physical address of bd
* @buf_tail: ID of the buffer that was processed
* @buf_ptail: ID of the previous buffer that was processed
* @period_len: period length, used in cyclic.
* @chn_real_count: the real count updated from bd->mode.count
* @chn_count: the transfer count set
* @sdmac: sdma_channel pointer
* @bd: pointer of allocate bd
*/
struct
sdma_desc
{
struct
virt_dma_desc
vd
;
unsigned
int
num_bd
;
dma_addr_t
bd_phys
;
unsigned
int
buf_tail
;
unsigned
int
buf_ptail
;
unsigned
int
period_len
;
unsigned
int
chn_real_count
;
unsigned
int
chn_count
;
struct
sdma_channel
*
sdmac
;
struct
sdma_buffer_descriptor
*
bd
;
};
/**
/**
* struct sdma_channel - housekeeping for a SDMA channel
* struct sdma_channel - housekeeping for a SDMA channel
*
*
* @sdma pointer to the SDMA engine for this channel
* @vc: virt_dma base structure
* @channel the channel number, matches dmaengine chan_id + 1
* @desc: sdma description including vd and other special member
* @direction transfer type. Needed for setting SDMA script
* @sdma: pointer to the SDMA engine for this channel
* @peripheral_type Peripheral type. Needed for setting SDMA script
* @channel: the channel number, matches dmaengine chan_id + 1
* @event_id0 aka dma request line
* @direction: transfer type. Needed for setting SDMA script
* @event_id1 for channels that use 2 events
* @peripheral_type: Peripheral type. Needed for setting SDMA script
* @word_size peripheral access size
* @event_id0: aka dma request line
* @buf_tail ID of the buffer that was processed
* @event_id1: for channels that use 2 events
* @buf_ptail ID of the previous buffer that was processed
* @word_size: peripheral access size
* @num_bd max NUM_BD. number of descriptors currently handling
* @pc_from_device: script address for those device_2_memory
* @pc_to_device: script address for those memory_2_device
* @device_to_device: script address for those device_2_device
* @pc_to_pc: script address for those memory_2_memory
* @flags: loop mode or not
* @per_address: peripheral source or destination address in common case
* destination address in p_2_p case
* @per_address2: peripheral source address in p_2_p case
* @event_mask: event mask used in p_2_p script
* @watermark_level: value for gReg[7], some script will extend it from
* basic watermark such as p_2_p
* @shp_addr: value for gReg[6]
* @per_addr: value for gReg[2]
* @status: status of dma channel
* @data: specific sdma interface structure
* @bd_pool: dma_pool for bd
*/
*/
struct
sdma_channel
{
struct
sdma_channel
{
struct
virt_dma_chan
vc
;
struct
sdma_desc
*
desc
;
struct
sdma_engine
*
sdma
;
struct
sdma_engine
*
sdma
;
unsigned
int
channel
;
unsigned
int
channel
;
enum
dma_transfer_direction
direction
;
enum
dma_transfer_direction
direction
;
...
@@ -310,28 +366,17 @@ struct sdma_channel {
...
@@ -310,28 +366,17 @@ struct sdma_channel {
unsigned
int
event_id0
;
unsigned
int
event_id0
;
unsigned
int
event_id1
;
unsigned
int
event_id1
;
enum
dma_slave_buswidth
word_size
;
enum
dma_slave_buswidth
word_size
;
unsigned
int
buf_tail
;
unsigned
int
buf_ptail
;
unsigned
int
num_bd
;
unsigned
int
period_len
;
struct
sdma_buffer_descriptor
*
bd
;
dma_addr_t
bd_phys
;
unsigned
int
pc_from_device
,
pc_to_device
;
unsigned
int
pc_from_device
,
pc_to_device
;
unsigned
int
device_to_device
;
unsigned
int
device_to_device
;
unsigned
int
pc_to_pc
;
unsigned
long
flags
;
unsigned
long
flags
;
dma_addr_t
per_address
,
per_address2
;
dma_addr_t
per_address
,
per_address2
;
unsigned
long
event_mask
[
2
];
unsigned
long
event_mask
[
2
];
unsigned
long
watermark_level
;
unsigned
long
watermark_level
;
u32
shp_addr
,
per_addr
;
u32
shp_addr
,
per_addr
;
struct
dma_chan
chan
;
spinlock_t
lock
;
struct
dma_async_tx_descriptor
desc
;
enum
dma_status
status
;
enum
dma_status
status
;
unsigned
int
chn_count
;
unsigned
int
chn_real_count
;
struct
tasklet_struct
tasklet
;
struct
imx_dma_data
data
;
struct
imx_dma_data
data
;
bool
enabled
;
struct
dma_pool
*
bd_pool
;
};
};
#define IMX_DMA_SG_LOOP BIT(0)
#define IMX_DMA_SG_LOOP BIT(0)
...
@@ -346,15 +391,15 @@ struct sdma_channel {
...
@@ -346,15 +391,15 @@ struct sdma_channel {
/**
/**
* struct sdma_firmware_header - Layout of the firmware image
* struct sdma_firmware_header - Layout of the firmware image
*
*
* @magic "SDMA"
* @magic
:
"SDMA"
* @version_major
increased whenever layout of struct sdma_script_start_addrs
* @version_major
: increased whenever layout of struct
* changes.
*
sdma_script_start_addrs
changes.
* @version_minor firmware minor version (for binary compatible changes)
* @version_minor
:
firmware minor version (for binary compatible changes)
* @script_addrs_start offset of struct sdma_script_start_addrs in this image
* @script_addrs_start
:
offset of struct sdma_script_start_addrs in this image
* @num_script_addrs Number of script addresses in this image
* @num_script_addrs
:
Number of script addresses in this image
* @ram_code_start offset of SDMA ram image in this firmware image
* @ram_code_start
:
offset of SDMA ram image in this firmware image
* @ram_code_size size of SDMA ram image
* @ram_code_size
:
size of SDMA ram image
* @script_addrs Stores the start address of the SDMA scripts
* @script_addrs
:
Stores the start address of the SDMA scripts
* (in SDMA memory space)
* (in SDMA memory space)
*/
*/
struct
sdma_firmware_header
{
struct
sdma_firmware_header
{
...
@@ -391,6 +436,8 @@ struct sdma_engine {
...
@@ -391,6 +436,8 @@ struct sdma_engine {
u32
spba_start_addr
;
u32
spba_start_addr
;
u32
spba_end_addr
;
u32
spba_end_addr
;
unsigned
int
irq
;
unsigned
int
irq
;
dma_addr_t
bd0_phys
;
struct
sdma_buffer_descriptor
*
bd0
;
};
};
static
struct
sdma_driver_data
sdma_imx31
=
{
static
struct
sdma_driver_data
sdma_imx31
=
{
...
@@ -590,14 +637,7 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
...
@@ -590,14 +637,7 @@ static int sdma_config_ownership(struct sdma_channel *sdmac,
static
void
sdma_enable_channel
(
struct
sdma_engine
*
sdma
,
int
channel
)
static
void
sdma_enable_channel
(
struct
sdma_engine
*
sdma
,
int
channel
)
{
{
unsigned
long
flags
;
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
channel
];
writel
(
BIT
(
channel
),
sdma
->
regs
+
SDMA_H_START
);
writel
(
BIT
(
channel
),
sdma
->
regs
+
SDMA_H_START
);
spin_lock_irqsave
(
&
sdmac
->
lock
,
flags
);
sdmac
->
enabled
=
true
;
spin_unlock_irqrestore
(
&
sdmac
->
lock
,
flags
);
}
}
/*
/*
...
@@ -625,7 +665,7 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
...
@@ -625,7 +665,7 @@ static int sdma_run_channel0(struct sdma_engine *sdma)
static
int
sdma_load_script
(
struct
sdma_engine
*
sdma
,
void
*
buf
,
int
size
,
static
int
sdma_load_script
(
struct
sdma_engine
*
sdma
,
void
*
buf
,
int
size
,
u32
address
)
u32
address
)
{
{
struct
sdma_buffer_descriptor
*
bd0
=
sdma
->
channel
[
0
].
bd
;
struct
sdma_buffer_descriptor
*
bd0
=
sdma
->
bd0
;
void
*
buf_virt
;
void
*
buf_virt
;
dma_addr_t
buf_phys
;
dma_addr_t
buf_phys
;
int
ret
;
int
ret
;
...
@@ -681,26 +721,49 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
...
@@ -681,26 +721,49 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
writel_relaxed
(
val
,
sdma
->
regs
+
chnenbl
);
writel_relaxed
(
val
,
sdma
->
regs
+
chnenbl
);
}
}
static
struct
sdma_desc
*
to_sdma_desc
(
struct
dma_async_tx_descriptor
*
t
)
{
return
container_of
(
t
,
struct
sdma_desc
,
vd
.
tx
);
}
static
void
sdma_start_desc
(
struct
sdma_channel
*
sdmac
)
{
struct
virt_dma_desc
*
vd
=
vchan_next_desc
(
&
sdmac
->
vc
);
struct
sdma_desc
*
desc
;
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
channel
=
sdmac
->
channel
;
if
(
!
vd
)
{
sdmac
->
desc
=
NULL
;
return
;
}
sdmac
->
desc
=
desc
=
to_sdma_desc
(
&
vd
->
tx
);
/*
* Do not delete the node in desc_issued list in cyclic mode, otherwise
* the desc allocated will never be freed in vchan_dma_desc_free_list
*/
if
(
!
(
sdmac
->
flags
&
IMX_DMA_SG_LOOP
))
list_del
(
&
vd
->
node
);
sdma
->
channel_control
[
channel
].
base_bd_ptr
=
desc
->
bd_phys
;
sdma
->
channel_control
[
channel
].
current_bd_ptr
=
desc
->
bd_phys
;
sdma_enable_channel
(
sdma
,
sdmac
->
channel
);
}
static
void
sdma_update_channel_loop
(
struct
sdma_channel
*
sdmac
)
static
void
sdma_update_channel_loop
(
struct
sdma_channel
*
sdmac
)
{
{
struct
sdma_buffer_descriptor
*
bd
;
struct
sdma_buffer_descriptor
*
bd
;
int
error
=
0
;
int
error
=
0
;
enum
dma_status
old_status
=
sdmac
->
status
;
enum
dma_status
old_status
=
sdmac
->
status
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
sdmac
->
lock
,
flags
);
if
(
!
sdmac
->
enabled
)
{
spin_unlock_irqrestore
(
&
sdmac
->
lock
,
flags
);
return
;
}
spin_unlock_irqrestore
(
&
sdmac
->
lock
,
flags
);
/*
/*
* loop mode. Iterate over descriptors, re-setup them and
* loop mode. Iterate over descriptors, re-setup them and
* call callback function.
* call callback function.
*/
*/
while
(
1
)
{
while
(
sdmac
->
desc
)
{
bd
=
&
sdmac
->
bd
[
sdmac
->
buf_tail
];
struct
sdma_desc
*
desc
=
sdmac
->
desc
;
bd
=
&
desc
->
bd
[
desc
->
buf_tail
];
if
(
bd
->
mode
.
status
&
BD_DONE
)
if
(
bd
->
mode
.
status
&
BD_DONE
)
break
;
break
;
...
@@ -716,11 +779,11 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
...
@@ -716,11 +779,11 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* the number of bytes present in the current buffer descriptor.
* the number of bytes present in the current buffer descriptor.
*/
*/
sdma
c
->
chn_real_count
=
bd
->
mode
.
count
;
des
c
->
chn_real_count
=
bd
->
mode
.
count
;
bd
->
mode
.
status
|=
BD_DONE
;
bd
->
mode
.
status
|=
BD_DONE
;
bd
->
mode
.
count
=
sdma
c
->
period_len
;
bd
->
mode
.
count
=
des
c
->
period_len
;
sdmac
->
buf_ptail
=
sdma
c
->
buf_tail
;
desc
->
buf_ptail
=
des
c
->
buf_tail
;
sdmac
->
buf_tail
=
(
sdmac
->
buf_tail
+
1
)
%
sdma
c
->
num_bd
;
desc
->
buf_tail
=
(
desc
->
buf_tail
+
1
)
%
des
c
->
num_bd
;
/*
/*
* The callback is called from the interrupt context in order
* The callback is called from the interrupt context in order
...
@@ -728,41 +791,38 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
...
@@ -728,41 +791,38 @@ static void sdma_update_channel_loop(struct sdma_channel *sdmac)
* SDMA transaction status by the time the client tasklet is
* SDMA transaction status by the time the client tasklet is
* executed.
* executed.
*/
*/
spin_unlock
(
&
sdmac
->
vc
.
lock
);
dmaengine_desc_get_callback_invoke
(
&
sdmac
->
desc
,
NULL
);
dmaengine_desc_get_callback_invoke
(
&
desc
->
vd
.
tx
,
NULL
);
spin_lock
(
&
sdmac
->
vc
.
lock
);
if
(
error
)
if
(
error
)
sdmac
->
status
=
old_status
;
sdmac
->
status
=
old_status
;
}
}
}
}
static
void
mxc_sdma_handle_channel_normal
(
unsigned
long
data
)
static
void
mxc_sdma_handle_channel_normal
(
struct
sdma_channel
*
data
)
{
{
struct
sdma_channel
*
sdmac
=
(
struct
sdma_channel
*
)
data
;
struct
sdma_channel
*
sdmac
=
(
struct
sdma_channel
*
)
data
;
struct
sdma_buffer_descriptor
*
bd
;
struct
sdma_buffer_descriptor
*
bd
;
int
i
,
error
=
0
;
int
i
,
error
=
0
;
sdmac
->
chn_real_count
=
0
;
sdmac
->
desc
->
chn_real_count
=
0
;
/*
/*
* non loop mode. Iterate over all descriptors, collect
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
* errors and call callback function
*/
*/
for
(
i
=
0
;
i
<
sdmac
->
num_bd
;
i
++
)
{
for
(
i
=
0
;
i
<
sdmac
->
desc
->
num_bd
;
i
++
)
{
bd
=
&
sdmac
->
bd
[
i
];
bd
=
&
sdmac
->
desc
->
bd
[
i
];
if
(
bd
->
mode
.
status
&
(
BD_DONE
|
BD_RROR
))
if
(
bd
->
mode
.
status
&
(
BD_DONE
|
BD_RROR
))
error
=
-
EIO
;
error
=
-
EIO
;
sdmac
->
chn_real_count
+=
bd
->
mode
.
count
;
sdmac
->
desc
->
chn_real_count
+=
bd
->
mode
.
count
;
}
}
if
(
error
)
if
(
error
)
sdmac
->
status
=
DMA_ERROR
;
sdmac
->
status
=
DMA_ERROR
;
else
else
sdmac
->
status
=
DMA_COMPLETE
;
sdmac
->
status
=
DMA_COMPLETE
;
dma_cookie_complete
(
&
sdmac
->
desc
);
dmaengine_desc_get_callback_invoke
(
&
sdmac
->
desc
,
NULL
);
}
}
static
irqreturn_t
sdma_int_handler
(
int
irq
,
void
*
dev_id
)
static
irqreturn_t
sdma_int_handler
(
int
irq
,
void
*
dev_id
)
...
@@ -778,12 +838,21 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
...
@@ -778,12 +838,21 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
while
(
stat
)
{
while
(
stat
)
{
int
channel
=
fls
(
stat
)
-
1
;
int
channel
=
fls
(
stat
)
-
1
;
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
channel
];
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
channel
];
struct
sdma_desc
*
desc
;
spin_lock
(
&
sdmac
->
vc
.
lock
);
desc
=
sdmac
->
desc
;
if
(
desc
)
{
if
(
sdmac
->
flags
&
IMX_DMA_SG_LOOP
)
{
sdma_update_channel_loop
(
sdmac
);
}
else
{
mxc_sdma_handle_channel_normal
(
sdmac
);
vchan_cookie_complete
(
&
desc
->
vd
);
sdma_start_desc
(
sdmac
);
}
}
if
(
sdmac
->
flags
&
IMX_DMA_SG_LOOP
)
spin_unlock
(
&
sdmac
->
vc
.
lock
);
sdma_update_channel_loop
(
sdmac
);
else
tasklet_schedule
(
&
sdmac
->
tasklet
);
__clear_bit
(
channel
,
&
stat
);
__clear_bit
(
channel
,
&
stat
);
}
}
...
@@ -802,14 +871,16 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
...
@@ -802,14 +871,16 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
* These are needed once we start to support transfers between
* These are needed once we start to support transfers between
* two peripherals or memory-to-memory transfers
* two peripherals or memory-to-memory transfers
*/
*/
int
per_2_per
=
0
;
int
per_2_per
=
0
,
emi_2_emi
=
0
;
sdmac
->
pc_from_device
=
0
;
sdmac
->
pc_from_device
=
0
;
sdmac
->
pc_to_device
=
0
;
sdmac
->
pc_to_device
=
0
;
sdmac
->
device_to_device
=
0
;
sdmac
->
device_to_device
=
0
;
sdmac
->
pc_to_pc
=
0
;
switch
(
peripheral_type
)
{
switch
(
peripheral_type
)
{
case
IMX_DMATYPE_MEMORY
:
case
IMX_DMATYPE_MEMORY
:
emi_2_emi
=
sdma
->
script_addrs
->
ap_2_ap_addr
;
break
;
break
;
case
IMX_DMATYPE_DSP
:
case
IMX_DMATYPE_DSP
:
emi_2_per
=
sdma
->
script_addrs
->
bp_2_ap_addr
;
emi_2_per
=
sdma
->
script_addrs
->
bp_2_ap_addr
;
...
@@ -882,6 +953,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
...
@@ -882,6 +953,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
sdmac
->
pc_from_device
=
per_2_emi
;
sdmac
->
pc_from_device
=
per_2_emi
;
sdmac
->
pc_to_device
=
emi_2_per
;
sdmac
->
pc_to_device
=
emi_2_per
;
sdmac
->
device_to_device
=
per_2_per
;
sdmac
->
device_to_device
=
per_2_per
;
sdmac
->
pc_to_pc
=
emi_2_emi
;
}
}
static
int
sdma_load_context
(
struct
sdma_channel
*
sdmac
)
static
int
sdma_load_context
(
struct
sdma_channel
*
sdmac
)
...
@@ -890,7 +962,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
...
@@ -890,7 +962,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
int
channel
=
sdmac
->
channel
;
int
channel
=
sdmac
->
channel
;
int
load_address
;
int
load_address
;
struct
sdma_context_data
*
context
=
sdma
->
context
;
struct
sdma_context_data
*
context
=
sdma
->
context
;
struct
sdma_buffer_descriptor
*
bd0
=
sdma
->
channel
[
0
].
bd
;
struct
sdma_buffer_descriptor
*
bd0
=
sdma
->
bd0
;
int
ret
;
int
ret
;
unsigned
long
flags
;
unsigned
long
flags
;
...
@@ -898,6 +970,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
...
@@ -898,6 +970,8 @@ static int sdma_load_context(struct sdma_channel *sdmac)
load_address
=
sdmac
->
pc_from_device
;
load_address
=
sdmac
->
pc_from_device
;
else
if
(
sdmac
->
direction
==
DMA_DEV_TO_DEV
)
else
if
(
sdmac
->
direction
==
DMA_DEV_TO_DEV
)
load_address
=
sdmac
->
device_to_device
;
load_address
=
sdmac
->
device_to_device
;
else
if
(
sdmac
->
direction
==
DMA_MEM_TO_MEM
)
load_address
=
sdmac
->
pc_to_pc
;
else
else
load_address
=
sdmac
->
pc_to_device
;
load_address
=
sdmac
->
pc_to_device
;
...
@@ -939,7 +1013,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
...
@@ -939,7 +1013,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
static
struct
sdma_channel
*
to_sdma_chan
(
struct
dma_chan
*
chan
)
static
struct
sdma_channel
*
to_sdma_chan
(
struct
dma_chan
*
chan
)
{
{
return
container_of
(
chan
,
struct
sdma_channel
,
chan
);
return
container_of
(
chan
,
struct
sdma_channel
,
vc
.
chan
);
}
}
static
int
sdma_disable_channel
(
struct
dma_chan
*
chan
)
static
int
sdma_disable_channel
(
struct
dma_chan
*
chan
)
...
@@ -947,21 +1021,25 @@ static int sdma_disable_channel(struct dma_chan *chan)
...
@@ -947,21 +1021,25 @@ static int sdma_disable_channel(struct dma_chan *chan)
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
channel
=
sdmac
->
channel
;
int
channel
=
sdmac
->
channel
;
unsigned
long
flags
;
writel_relaxed
(
BIT
(
channel
),
sdma
->
regs
+
SDMA_H_STATSTOP
);
writel_relaxed
(
BIT
(
channel
),
sdma
->
regs
+
SDMA_H_STATSTOP
);
sdmac
->
status
=
DMA_ERROR
;
sdmac
->
status
=
DMA_ERROR
;
spin_lock_irqsave
(
&
sdmac
->
lock
,
flags
);
sdmac
->
enabled
=
false
;
spin_unlock_irqrestore
(
&
sdmac
->
lock
,
flags
);
return
0
;
return
0
;
}
}
static
int
sdma_disable_channel_with_delay
(
struct
dma_chan
*
chan
)
static
int
sdma_disable_channel_with_delay
(
struct
dma_chan
*
chan
)
{
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
unsigned
long
flags
;
LIST_HEAD
(
head
);
sdma_disable_channel
(
chan
);
sdma_disable_channel
(
chan
);
spin_lock_irqsave
(
&
sdmac
->
vc
.
lock
,
flags
);
vchan_get_all_descriptors
(
&
sdmac
->
vc
,
&
head
);
sdmac
->
desc
=
NULL
;
spin_unlock_irqrestore
(
&
sdmac
->
vc
.
lock
,
flags
);
vchan_dma_desc_free_list
(
&
sdmac
->
vc
,
&
head
);
/*
/*
* According to NXP R&D team a delay of one BD SDMA cost time
* According to NXP R&D team a delay of one BD SDMA cost time
...
@@ -1090,52 +1168,81 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
...
@@ -1090,52 +1168,81 @@ static int sdma_set_channel_priority(struct sdma_channel *sdmac,
return
0
;
return
0
;
}
}
static
int
sdma_request_channel
(
struct
sdma_channel
*
sdmac
)
static
int
sdma_request_channel
0
(
struct
sdma_engine
*
sdma
)
{
{
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
channel
=
sdmac
->
channel
;
int
ret
=
-
EBUSY
;
int
ret
=
-
EBUSY
;
sdma
c
->
bd
=
dma_zalloc_coherent
(
NULL
,
PAGE_SIZE
,
&
sdmac
->
bd
_phys
,
sdma
->
bd0
=
dma_zalloc_coherent
(
NULL
,
PAGE_SIZE
,
&
sdma
->
bd0
_phys
,
GFP_
KERNEL
);
GFP_
NOWAIT
);
if
(
!
sdma
c
->
bd
)
{
if
(
!
sdma
->
bd0
)
{
ret
=
-
ENOMEM
;
ret
=
-
ENOMEM
;
goto
out
;
goto
out
;
}
}
sdma
->
channel_control
[
channel
].
base_bd_ptr
=
sdmac
->
bd
_phys
;
sdma
->
channel_control
[
0
].
base_bd_ptr
=
sdma
->
bd0
_phys
;
sdma
->
channel_control
[
channel
].
current_bd_ptr
=
sdmac
->
bd
_phys
;
sdma
->
channel_control
[
0
].
current_bd_ptr
=
sdma
->
bd0
_phys
;
sdma_set_channel_priority
(
sdmac
,
MXC_SDMA_DEFAULT_PRIORITY
);
sdma_set_channel_priority
(
&
sdma
->
channel
[
0
]
,
MXC_SDMA_DEFAULT_PRIORITY
);
return
0
;
return
0
;
out:
out:
return
ret
;
return
ret
;
}
}
static
dma_cookie_t
sdma_tx_submit
(
struct
dma_async_tx_descriptor
*
tx
)
static
int
sdma_alloc_bd
(
struct
sdma_desc
*
desc
)
{
{
unsigned
long
flags
;
int
ret
=
0
;
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
tx
->
chan
);
dma_cookie_t
cookie
;
spin_lock_irqsave
(
&
sdmac
->
lock
,
flags
);
desc
->
bd
=
dma_pool_alloc
(
desc
->
sdmac
->
bd_pool
,
GFP_NOWAIT
,
&
desc
->
bd_phys
);
if
(
!
desc
->
bd
)
{
ret
=
-
ENOMEM
;
goto
out
;
}
out:
return
ret
;
}
cookie
=
dma_cookie_assign
(
tx
);
static
void
sdma_free_bd
(
struct
sdma_desc
*
desc
)
{
dma_pool_free
(
desc
->
sdmac
->
bd_pool
,
desc
->
bd
,
desc
->
bd_phys
);
}
spin_unlock_irqrestore
(
&
sdmac
->
lock
,
flags
);
static
void
sdma_desc_free
(
struct
virt_dma_desc
*
vd
)
{
struct
sdma_desc
*
desc
=
container_of
(
vd
,
struct
sdma_desc
,
vd
);
return
cookie
;
sdma_free_bd
(
desc
);
kfree
(
desc
);
}
}
static
int
sdma_alloc_chan_resources
(
struct
dma_chan
*
chan
)
static
int
sdma_alloc_chan_resources
(
struct
dma_chan
*
chan
)
{
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
imx_dma_data
*
data
=
chan
->
private
;
struct
imx_dma_data
*
data
=
chan
->
private
;
struct
imx_dma_data
mem_data
;
int
prio
,
ret
;
int
prio
,
ret
;
if
(
!
data
)
/*
return
-
EINVAL
;
* MEMCPY may never setup chan->private by filter function such as
* dmatest, thus create 'struct imx_dma_data mem_data' for this case.
* Please note in any other slave case, you have to setup chan->private
* with 'struct imx_dma_data' in your own filter function if you want to
* request dma channel by dma_request_channel() rather than
* dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
* to warn you to correct your filter function.
*/
if
(
!
data
)
{
dev_dbg
(
sdmac
->
sdma
->
dev
,
"MEMCPY in case?
\n
"
);
mem_data
.
priority
=
2
;
mem_data
.
peripheral_type
=
IMX_DMATYPE_MEMORY
;
mem_data
.
dma_request
=
0
;
mem_data
.
dma_request2
=
0
;
data
=
&
mem_data
;
sdma_get_pc
(
sdmac
,
IMX_DMATYPE_MEMORY
);
}
switch
(
data
->
priority
)
{
switch
(
data
->
priority
)
{
case
DMA_PRIO_HIGH
:
case
DMA_PRIO_HIGH
:
...
@@ -1161,18 +1268,13 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
...
@@ -1161,18 +1268,13 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
if
(
ret
)
if
(
ret
)
goto
disable_clk_ipg
;
goto
disable_clk_ipg
;
ret
=
sdma_request_channel
(
sdmac
);
if
(
ret
)
goto
disable_clk_ahb
;
ret
=
sdma_set_channel_priority
(
sdmac
,
prio
);
ret
=
sdma_set_channel_priority
(
sdmac
,
prio
);
if
(
ret
)
if
(
ret
)
goto
disable_clk_ahb
;
goto
disable_clk_ahb
;
dma_async_tx_descriptor_init
(
&
sdmac
->
desc
,
chan
);
sdmac
->
bd_pool
=
dma_pool_create
(
"bd_pool"
,
chan
->
device
->
dev
,
sdmac
->
desc
.
tx_submit
=
sdma_tx_submit
;
sizeof
(
struct
sdma_buffer_descriptor
),
/* txd.flags will be overwritten in prep funcs */
32
,
0
);
sdmac
->
desc
.
flags
=
DMA_CTRL_ACK
;
return
0
;
return
0
;
...
@@ -1188,7 +1290,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
...
@@ -1188,7 +1290,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
sdma_disable_channel
(
chan
);
sdma_disable_channel
_with_delay
(
chan
);
if
(
sdmac
->
event_id0
)
if
(
sdmac
->
event_id0
)
sdma_event_disable
(
sdmac
,
sdmac
->
event_id0
);
sdma_event_disable
(
sdmac
,
sdmac
->
event_id0
);
...
@@ -1200,10 +1302,105 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
...
@@ -1200,10 +1302,105 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_set_channel_priority
(
sdmac
,
0
);
sdma_set_channel_priority
(
sdmac
,
0
);
dma_free_coherent
(
NULL
,
PAGE_SIZE
,
sdmac
->
bd
,
sdmac
->
bd_phys
);
clk_disable
(
sdma
->
clk_ipg
);
clk_disable
(
sdma
->
clk_ipg
);
clk_disable
(
sdma
->
clk_ahb
);
clk_disable
(
sdma
->
clk_ahb
);
dma_pool_destroy
(
sdmac
->
bd_pool
);
sdmac
->
bd_pool
=
NULL
;
}
static
struct
sdma_desc
*
sdma_transfer_init
(
struct
sdma_channel
*
sdmac
,
enum
dma_transfer_direction
direction
,
u32
bds
)
{
struct
sdma_desc
*
desc
;
desc
=
kzalloc
((
sizeof
(
*
desc
)),
GFP_NOWAIT
);
if
(
!
desc
)
goto
err_out
;
sdmac
->
status
=
DMA_IN_PROGRESS
;
sdmac
->
direction
=
direction
;
sdmac
->
flags
=
0
;
desc
->
chn_count
=
0
;
desc
->
chn_real_count
=
0
;
desc
->
buf_tail
=
0
;
desc
->
buf_ptail
=
0
;
desc
->
sdmac
=
sdmac
;
desc
->
num_bd
=
bds
;
if
(
sdma_alloc_bd
(
desc
))
goto
err_desc_out
;
/* No slave_config called in MEMCPY case, so do here */
if
(
direction
==
DMA_MEM_TO_MEM
)
sdma_config_ownership
(
sdmac
,
false
,
true
,
false
);
if
(
sdma_load_context
(
sdmac
))
goto
err_desc_out
;
return
desc
;
err_desc_out:
kfree
(
desc
);
err_out:
return
NULL
;
}
static
struct
dma_async_tx_descriptor
*
sdma_prep_memcpy
(
struct
dma_chan
*
chan
,
dma_addr_t
dma_dst
,
dma_addr_t
dma_src
,
size_t
len
,
unsigned
long
flags
)
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
channel
=
sdmac
->
channel
;
size_t
count
;
int
i
=
0
,
param
;
struct
sdma_buffer_descriptor
*
bd
;
struct
sdma_desc
*
desc
;
if
(
!
chan
||
!
len
)
return
NULL
;
dev_dbg
(
sdma
->
dev
,
"memcpy: %pad->%pad, len=%zu, channel=%d.
\n
"
,
&
dma_src
,
&
dma_dst
,
len
,
channel
);
desc
=
sdma_transfer_init
(
sdmac
,
DMA_MEM_TO_MEM
,
len
/
SDMA_BD_MAX_CNT
+
1
);
if
(
!
desc
)
return
NULL
;
do
{
count
=
min_t
(
size_t
,
len
,
SDMA_BD_MAX_CNT
);
bd
=
&
desc
->
bd
[
i
];
bd
->
buffer_addr
=
dma_src
;
bd
->
ext_buffer_addr
=
dma_dst
;
bd
->
mode
.
count
=
count
;
desc
->
chn_count
+=
count
;
bd
->
mode
.
command
=
0
;
dma_src
+=
count
;
dma_dst
+=
count
;
len
-=
count
;
i
++
;
param
=
BD_DONE
|
BD_EXTD
|
BD_CONT
;
/* last bd */
if
(
!
len
)
{
param
|=
BD_INTR
;
param
|=
BD_LAST
;
param
&=
~
BD_CONT
;
}
dev_dbg
(
sdma
->
dev
,
"entry %d: count: %zd dma: 0x%x %s%s
\n
"
,
i
,
count
,
bd
->
buffer_addr
,
param
&
BD_WRAP
?
"wrap"
:
""
,
param
&
BD_INTR
?
" intr"
:
""
);
bd
->
mode
.
status
=
param
;
}
while
(
len
);
return
vchan_tx_prep
(
&
sdmac
->
vc
,
&
desc
->
vd
,
flags
);
}
}
static
struct
dma_async_tx_descriptor
*
sdma_prep_slave_sg
(
static
struct
dma_async_tx_descriptor
*
sdma_prep_slave_sg
(
...
@@ -1213,75 +1410,54 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
...
@@ -1213,75 +1410,54 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
{
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
ret
,
i
,
count
;
int
i
,
count
;
int
channel
=
sdmac
->
channel
;
int
channel
=
sdmac
->
channel
;
struct
scatterlist
*
sg
;
struct
scatterlist
*
sg
;
struct
sdma_desc
*
desc
;
if
(
sdmac
->
status
==
DMA_IN_PROGRESS
)
desc
=
sdma_transfer_init
(
sdmac
,
direction
,
sg_len
);
return
NULL
;
if
(
!
desc
)
sdmac
->
status
=
DMA_IN_PROGRESS
;
goto
err_out
;
sdmac
->
flags
=
0
;
sdmac
->
buf_tail
=
0
;
sdmac
->
buf_ptail
=
0
;
sdmac
->
chn_real_count
=
0
;
dev_dbg
(
sdma
->
dev
,
"setting up %d entries for channel %d.
\n
"
,
dev_dbg
(
sdma
->
dev
,
"setting up %d entries for channel %d.
\n
"
,
sg_len
,
channel
);
sg_len
,
channel
);
sdmac
->
direction
=
direction
;
ret
=
sdma_load_context
(
sdmac
);
if
(
ret
)
goto
err_out
;
if
(
sg_len
>
NUM_BD
)
{
dev_err
(
sdma
->
dev
,
"SDMA channel %d: maximum number of sg exceeded: %d > %d
\n
"
,
channel
,
sg_len
,
NUM_BD
);
ret
=
-
EINVAL
;
goto
err_out
;
}
sdmac
->
chn_count
=
0
;
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
for_each_sg
(
sgl
,
sg
,
sg_len
,
i
)
{
struct
sdma_buffer_descriptor
*
bd
=
&
sdma
c
->
bd
[
i
];
struct
sdma_buffer_descriptor
*
bd
=
&
des
c
->
bd
[
i
];
int
param
;
int
param
;
bd
->
buffer_addr
=
sg
->
dma_address
;
bd
->
buffer_addr
=
sg
->
dma_address
;
count
=
sg_dma_len
(
sg
);
count
=
sg_dma_len
(
sg
);
if
(
count
>
0xffff
)
{
if
(
count
>
SDMA_BD_MAX_CNT
)
{
dev_err
(
sdma
->
dev
,
"SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d
\n
"
,
dev_err
(
sdma
->
dev
,
"SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d
\n
"
,
channel
,
count
,
0xffff
);
channel
,
count
,
SDMA_BD_MAX_CNT
);
ret
=
-
EINVAL
;
goto
err_bd_out
;
goto
err_out
;
}
}
bd
->
mode
.
count
=
count
;
bd
->
mode
.
count
=
count
;
sdma
c
->
chn_count
+=
count
;
des
c
->
chn_count
+=
count
;
if
(
sdmac
->
word_size
>
DMA_SLAVE_BUSWIDTH_4_BYTES
)
{
if
(
sdmac
->
word_size
>
DMA_SLAVE_BUSWIDTH_4_BYTES
)
ret
=
-
EINVAL
;
goto
err_bd_out
;
goto
err_out
;
}
switch
(
sdmac
->
word_size
)
{
switch
(
sdmac
->
word_size
)
{
case
DMA_SLAVE_BUSWIDTH_4_BYTES
:
case
DMA_SLAVE_BUSWIDTH_4_BYTES
:
bd
->
mode
.
command
=
0
;
bd
->
mode
.
command
=
0
;
if
(
count
&
3
||
sg
->
dma_address
&
3
)
if
(
count
&
3
||
sg
->
dma_address
&
3
)
return
NULL
;
goto
err_bd_out
;
break
;
break
;
case
DMA_SLAVE_BUSWIDTH_2_BYTES
:
case
DMA_SLAVE_BUSWIDTH_2_BYTES
:
bd
->
mode
.
command
=
2
;
bd
->
mode
.
command
=
2
;
if
(
count
&
1
||
sg
->
dma_address
&
1
)
if
(
count
&
1
||
sg
->
dma_address
&
1
)
return
NULL
;
goto
err_bd_out
;
break
;
break
;
case
DMA_SLAVE_BUSWIDTH_1_BYTE
:
case
DMA_SLAVE_BUSWIDTH_1_BYTE
:
bd
->
mode
.
command
=
1
;
bd
->
mode
.
command
=
1
;
break
;
break
;
default:
default:
return
NULL
;
goto
err_bd_out
;
}
}
param
=
BD_DONE
|
BD_EXTD
|
BD_CONT
;
param
=
BD_DONE
|
BD_EXTD
|
BD_CONT
;
...
@@ -1300,10 +1476,10 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
...
@@ -1300,10 +1476,10 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
bd
->
mode
.
status
=
param
;
bd
->
mode
.
status
=
param
;
}
}
sdmac
->
num_bd
=
sg_len
;
return
vchan_tx_prep
(
&
sdmac
->
vc
,
&
desc
->
vd
,
flags
)
;
sdma
->
channel_control
[
channel
].
current_bd_ptr
=
sdmac
->
bd_phys
;
err_bd_out:
sdma_free_bd
(
desc
);
return
&
sdmac
->
desc
;
kfree
(
desc
)
;
err_out:
err_out:
sdmac
->
status
=
DMA_ERROR
;
sdmac
->
status
=
DMA_ERROR
;
return
NULL
;
return
NULL
;
...
@@ -1318,40 +1494,27 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
...
@@ -1318,40 +1494,27 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
int
num_periods
=
buf_len
/
period_len
;
int
num_periods
=
buf_len
/
period_len
;
int
channel
=
sdmac
->
channel
;
int
channel
=
sdmac
->
channel
;
int
ret
,
i
=
0
,
buf
=
0
;
int
i
=
0
,
buf
=
0
;
struct
sdma_desc
*
desc
;
dev_dbg
(
sdma
->
dev
,
"%s channel: %d
\n
"
,
__func__
,
channel
);
dev_dbg
(
sdma
->
dev
,
"%s channel: %d
\n
"
,
__func__
,
channel
);
if
(
sdmac
->
status
==
DMA_IN_PROGRESS
)
desc
=
sdma_transfer_init
(
sdmac
,
direction
,
num_periods
);
return
NULL
;
if
(
!
desc
)
goto
err_out
;
sdmac
->
status
=
DMA_IN_PROGRESS
;
sdmac
->
buf_tail
=
0
;
desc
->
period_len
=
period_len
;
sdmac
->
buf_ptail
=
0
;
sdmac
->
chn_real_count
=
0
;
sdmac
->
period_len
=
period_len
;
sdmac
->
flags
|=
IMX_DMA_SG_LOOP
;
sdmac
->
flags
|=
IMX_DMA_SG_LOOP
;
sdmac
->
direction
=
direction
;
ret
=
sdma_load_context
(
sdmac
);
if
(
ret
)
goto
err_out
;
if
(
num_periods
>
NUM_BD
)
{
dev_err
(
sdma
->
dev
,
"SDMA channel %d: maximum number of sg exceeded: %d > %d
\n
"
,
channel
,
num_periods
,
NUM_BD
);
goto
err_out
;
}
if
(
period_len
>
0xffff
)
{
if
(
period_len
>
SDMA_BD_MAX_CNT
)
{
dev_err
(
sdma
->
dev
,
"SDMA channel %d: maximum period size exceeded: %zu > %d
\n
"
,
dev_err
(
sdma
->
dev
,
"SDMA channel %d: maximum period size exceeded: %zu > %d
\n
"
,
channel
,
period_len
,
0xffff
);
channel
,
period_len
,
SDMA_BD_MAX_CNT
);
goto
err_out
;
goto
err_
bd_
out
;
}
}
while
(
buf
<
buf_len
)
{
while
(
buf
<
buf_len
)
{
struct
sdma_buffer_descriptor
*
bd
=
&
sdma
c
->
bd
[
i
];
struct
sdma_buffer_descriptor
*
bd
=
&
des
c
->
bd
[
i
];
int
param
;
int
param
;
bd
->
buffer_addr
=
dma_addr
;
bd
->
buffer_addr
=
dma_addr
;
...
@@ -1359,7 +1522,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
...
@@ -1359,7 +1522,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
bd
->
mode
.
count
=
period_len
;
bd
->
mode
.
count
=
period_len
;
if
(
sdmac
->
word_size
>
DMA_SLAVE_BUSWIDTH_4_BYTES
)
if
(
sdmac
->
word_size
>
DMA_SLAVE_BUSWIDTH_4_BYTES
)
goto
err_out
;
goto
err_
bd_
out
;
if
(
sdmac
->
word_size
==
DMA_SLAVE_BUSWIDTH_4_BYTES
)
if
(
sdmac
->
word_size
==
DMA_SLAVE_BUSWIDTH_4_BYTES
)
bd
->
mode
.
command
=
0
;
bd
->
mode
.
command
=
0
;
else
else
...
@@ -1382,10 +1545,10 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
...
@@ -1382,10 +1545,10 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
i
++
;
i
++
;
}
}
sdmac
->
num_bd
=
num_periods
;
return
vchan_tx_prep
(
&
sdmac
->
vc
,
&
desc
->
vd
,
flags
)
;
sdma
->
channel_control
[
channel
].
current_bd_ptr
=
sdmac
->
bd_phys
;
err_bd_out:
sdma_free_bd
(
desc
);
return
&
sdmac
->
desc
;
kfree
(
desc
)
;
err_out:
err_out:
sdmac
->
status
=
DMA_ERROR
;
sdmac
->
status
=
DMA_ERROR
;
return
NULL
;
return
NULL
;
...
@@ -1424,13 +1587,31 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
...
@@ -1424,13 +1587,31 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
struct
dma_tx_state
*
txstate
)
struct
dma_tx_state
*
txstate
)
{
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_desc
*
desc
;
u32
residue
;
u32
residue
;
struct
virt_dma_desc
*
vd
;
enum
dma_status
ret
;
unsigned
long
flags
;
if
(
sdmac
->
flags
&
IMX_DMA_SG_LOOP
)
ret
=
dma_cookie_status
(
chan
,
cookie
,
txstate
);
residue
=
(
sdmac
->
num_bd
-
sdmac
->
buf_ptail
)
*
if
(
ret
==
DMA_COMPLETE
||
!
txstate
)
sdmac
->
period_len
-
sdmac
->
chn_real_count
;
return
ret
;
else
residue
=
sdmac
->
chn_count
-
sdmac
->
chn_real_count
;
spin_lock_irqsave
(
&
sdmac
->
vc
.
lock
,
flags
);
vd
=
vchan_find_desc
(
&
sdmac
->
vc
,
cookie
);
if
(
vd
)
{
desc
=
to_sdma_desc
(
&
vd
->
tx
);
if
(
sdmac
->
flags
&
IMX_DMA_SG_LOOP
)
residue
=
(
desc
->
num_bd
-
desc
->
buf_ptail
)
*
desc
->
period_len
-
desc
->
chn_real_count
;
else
residue
=
desc
->
chn_count
-
desc
->
chn_real_count
;
}
else
if
(
sdmac
->
desc
&&
sdmac
->
desc
->
vd
.
tx
.
cookie
==
cookie
)
{
residue
=
sdmac
->
desc
->
chn_count
-
sdmac
->
desc
->
chn_real_count
;
}
else
{
residue
=
0
;
}
spin_unlock_irqrestore
(
&
sdmac
->
vc
.
lock
,
flags
);
dma_set_tx_state
(
txstate
,
chan
->
completed_cookie
,
chan
->
cookie
,
dma_set_tx_state
(
txstate
,
chan
->
completed_cookie
,
chan
->
cookie
,
residue
);
residue
);
...
@@ -1441,10 +1622,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
...
@@ -1441,10 +1622,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
static
void
sdma_issue_pending
(
struct
dma_chan
*
chan
)
static
void
sdma_issue_pending
(
struct
dma_chan
*
chan
)
{
{
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_channel
*
sdmac
=
to_sdma_chan
(
chan
);
struct
sdma_engine
*
sdma
=
sdmac
->
sdma
;
unsigned
long
flags
;
if
(
sdmac
->
status
==
DMA_IN_PROGRESS
)
spin_lock_irqsave
(
&
sdmac
->
vc
.
lock
,
flags
);
sdma_enable_channel
(
sdma
,
sdmac
->
channel
);
if
(
vchan_issue_pending
(
&
sdmac
->
vc
)
&&
!
sdmac
->
desc
)
sdma_start_desc
(
sdmac
);
spin_unlock_irqrestore
(
&
sdmac
->
vc
.
lock
,
flags
);
}
}
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
...
@@ -1650,7 +1833,7 @@ static int sdma_init(struct sdma_engine *sdma)
...
@@ -1650,7 +1833,7 @@ static int sdma_init(struct sdma_engine *sdma)
for
(
i
=
0
;
i
<
MAX_DMA_CHANNELS
;
i
++
)
for
(
i
=
0
;
i
<
MAX_DMA_CHANNELS
;
i
++
)
writel_relaxed
(
0
,
sdma
->
regs
+
SDMA_CHNPRI_0
+
i
*
4
);
writel_relaxed
(
0
,
sdma
->
regs
+
SDMA_CHNPRI_0
+
i
*
4
);
ret
=
sdma_request_channel
(
&
sdma
->
channel
[
0
]
);
ret
=
sdma_request_channel
0
(
sdma
);
if
(
ret
)
if
(
ret
)
goto
err_dma_alloc
;
goto
err_dma_alloc
;
...
@@ -1805,6 +1988,7 @@ static int sdma_probe(struct platform_device *pdev)
...
@@ -1805,6 +1988,7 @@ static int sdma_probe(struct platform_device *pdev)
dma_cap_set
(
DMA_SLAVE
,
sdma
->
dma_device
.
cap_mask
);
dma_cap_set
(
DMA_SLAVE
,
sdma
->
dma_device
.
cap_mask
);
dma_cap_set
(
DMA_CYCLIC
,
sdma
->
dma_device
.
cap_mask
);
dma_cap_set
(
DMA_CYCLIC
,
sdma
->
dma_device
.
cap_mask
);
dma_cap_set
(
DMA_MEMCPY
,
sdma
->
dma_device
.
cap_mask
);
INIT_LIST_HEAD
(
&
sdma
->
dma_device
.
channels
);
INIT_LIST_HEAD
(
&
sdma
->
dma_device
.
channels
);
/* Initialize channel parameters */
/* Initialize channel parameters */
...
@@ -1812,22 +1996,16 @@ static int sdma_probe(struct platform_device *pdev)
...
@@ -1812,22 +1996,16 @@ static int sdma_probe(struct platform_device *pdev)
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
i
];
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
i
];
sdmac
->
sdma
=
sdma
;
sdmac
->
sdma
=
sdma
;
spin_lock_init
(
&
sdmac
->
lock
);
sdmac
->
chan
.
device
=
&
sdma
->
dma_device
;
dma_cookie_init
(
&
sdmac
->
chan
);
sdmac
->
channel
=
i
;
sdmac
->
channel
=
i
;
sdmac
->
vc
.
desc_free
=
sdma_desc_free
;
tasklet_init
(
&
sdmac
->
tasklet
,
mxc_sdma_handle_channel_normal
,
(
unsigned
long
)
sdmac
);
/*
/*
* Add the channel to the DMAC list. Do not add channel 0 though
* Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means
* because we need it internally in the SDMA driver. This also means
* that channel 0 in dmaengine counting matches sdma channel 1.
* that channel 0 in dmaengine counting matches sdma channel 1.
*/
*/
if
(
i
)
if
(
i
)
list_add_tail
(
&
sdmac
->
chan
.
device_node
,
vchan_init
(
&
sdmac
->
vc
,
&
sdma
->
dma_device
);
&
sdma
->
dma_device
.
channels
);
}
}
ret
=
sdma_init
(
sdma
);
ret
=
sdma_init
(
sdma
);
...
@@ -1877,9 +2055,10 @@ static int sdma_probe(struct platform_device *pdev)
...
@@ -1877,9 +2055,10 @@ static int sdma_probe(struct platform_device *pdev)
sdma
->
dma_device
.
dst_addr_widths
=
SDMA_DMA_BUSWIDTHS
;
sdma
->
dma_device
.
dst_addr_widths
=
SDMA_DMA_BUSWIDTHS
;
sdma
->
dma_device
.
directions
=
SDMA_DMA_DIRECTIONS
;
sdma
->
dma_device
.
directions
=
SDMA_DMA_DIRECTIONS
;
sdma
->
dma_device
.
residue_granularity
=
DMA_RESIDUE_GRANULARITY_SEGMENT
;
sdma
->
dma_device
.
residue_granularity
=
DMA_RESIDUE_GRANULARITY_SEGMENT
;
sdma
->
dma_device
.
device_prep_dma_memcpy
=
sdma_prep_memcpy
;
sdma
->
dma_device
.
device_issue_pending
=
sdma_issue_pending
;
sdma
->
dma_device
.
device_issue_pending
=
sdma_issue_pending
;
sdma
->
dma_device
.
dev
->
dma_parms
=
&
sdma
->
dma_parms
;
sdma
->
dma_device
.
dev
->
dma_parms
=
&
sdma
->
dma_parms
;
dma_set_max_seg_size
(
sdma
->
dma_device
.
dev
,
65535
);
dma_set_max_seg_size
(
sdma
->
dma_device
.
dev
,
SDMA_BD_MAX_CNT
);
platform_set_drvdata
(
pdev
,
sdma
);
platform_set_drvdata
(
pdev
,
sdma
);
...
@@ -1932,7 +2111,8 @@ static int sdma_remove(struct platform_device *pdev)
...
@@ -1932,7 +2111,8 @@ static int sdma_remove(struct platform_device *pdev)
for
(
i
=
0
;
i
<
MAX_DMA_CHANNELS
;
i
++
)
{
for
(
i
=
0
;
i
<
MAX_DMA_CHANNELS
;
i
++
)
{
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
i
];
struct
sdma_channel
*
sdmac
=
&
sdma
->
channel
[
i
];
tasklet_kill
(
&
sdmac
->
tasklet
);
tasklet_kill
(
&
sdmac
->
vc
.
task
);
sdma_free_chan_resources
(
&
sdmac
->
vc
.
chan
);
}
}
platform_set_drvdata
(
pdev
,
NULL
);
platform_set_drvdata
(
pdev
,
NULL
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment