Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
feb59d77
Commit
feb59d77
authored
Mar 12, 2019
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/xilinx' into for-linus
parents
42cb6e07
c2be36ac
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
104 additions
and
73 deletions
+104
-73
Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
+4
-3
drivers/dma/xilinx/xilinx_dma.c
drivers/dma/xilinx/xilinx_dma.c
+100
-70
No files found.
Documentation/devicetree/bindings/dma/xilinx/xilinx_dma.txt
View file @
feb59d77
...
...
@@ -37,10 +37,11 @@ Required properties:
Required properties for VDMA:
- xlnx,num-fstores: Should be the number of framebuffers as configured in h/w.
Optional properties:
- xlnx,include-sg: Tells configured for Scatter-mode in
the hardware.
Optional properties for AXI DMA:
- xlnx,sg-length-width: Should be set to the width in bits of the length
register as configured in h/w. Takes values {8...26}. If the property
is missing or invalid then the default value 23 is used. This is the
maximum value that is supported by all IP versions.
- xlnx,mcdma: Tells whether configured for multi-channel mode in the hardware.
Optional properties for VDMA:
- xlnx,flush-fsync: Tells which channel to Flush on Frame sync.
...
...
drivers/dma/xilinx/xilinx_dma.c
View file @
feb59d77
...
...
@@ -86,6 +86,7 @@
#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
#define XILINX_DMA_DMASR_SG_MASK BIT(3)
#define XILINX_DMA_DMASR_IDLE BIT(1)
#define XILINX_DMA_DMASR_HALTED BIT(0)
#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
...
...
@@ -161,7 +162,9 @@
#define XILINX_DMA_REG_BTT 0x28
/* AXI DMA Specific Masks/Bit fields */
#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
#define XILINX_DMA_MAX_TRANS_LEN_MIN 8
#define XILINX_DMA_MAX_TRANS_LEN_MAX 23
#define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
#define XILINX_DMA_CR_COALESCE_SHIFT 16
...
...
@@ -412,7 +415,6 @@ struct xilinx_dma_config {
* @dev: Device Structure
* @common: DMA device structure
* @chan: Driver specific DMA channel
* @has_sg: Specifies whether Scatter-Gather is present or not
* @mcdma: Specifies whether Multi-Channel is present or not
* @flush_on_fsync: Flush on frame sync
* @ext_addr: Indicates 64 bit addressing is supported by dma device
...
...
@@ -425,13 +427,13 @@ struct xilinx_dma_config {
* @rxs_clk: DMA s2mm stream clock
* @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier
* @max_buffer_len: Max buffer length
*/
struct
xilinx_dma_device
{
void
__iomem
*
regs
;
struct
device
*
dev
;
struct
dma_device
common
;
struct
xilinx_dma_chan
*
chan
[
XILINX_DMA_MAX_CHANS_PER_DEVICE
];
bool
has_sg
;
bool
mcdma
;
u32
flush_on_fsync
;
bool
ext_addr
;
...
...
@@ -444,6 +446,7 @@ struct xilinx_dma_device {
struct
clk
*
rxs_clk
;
u32
nr_channels
;
u32
chan_id
;
u32
max_buffer_len
;
};
/* Macros */
...
...
@@ -959,6 +962,34 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
return
0
;
}
/**
* xilinx_dma_calc_copysize - Calculate the amount of data to copy
* @chan: Driver specific DMA channel
* @size: Total data that needs to be copied
* @done: Amount of data that has been already copied
*
* Return: Amount of data that has to be copied
*/
static
int
xilinx_dma_calc_copysize
(
struct
xilinx_dma_chan
*
chan
,
int
size
,
int
done
)
{
size_t
copy
;
copy
=
min_t
(
size_t
,
size
-
done
,
chan
->
xdev
->
max_buffer_len
);
if
((
copy
+
done
<
size
)
&&
chan
->
xdev
->
common
.
copy_align
)
{
/*
* If this is not the last descriptor, make sure
* the next one will be properly aligned
*/
copy
=
rounddown
(
copy
,
(
1
<<
chan
->
xdev
->
common
.
copy_align
));
}
return
copy
;
}
/**
* xilinx_dma_tx_status - Get DMA transaction status
* @dchan: DMA channel
...
...
@@ -992,7 +1023,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
list_for_each_entry
(
segment
,
&
desc
->
segments
,
node
)
{
hw
=
&
segment
->
hw
;
residue
+=
(
hw
->
control
-
hw
->
status
)
&
XILINX_DMA_MAX_TRANS_LEN
;
chan
->
xdev
->
max_buffer_len
;
}
}
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
...
...
@@ -1070,7 +1101,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
struct
xilinx_vdma_config
*
config
=
&
chan
->
config
;
struct
xilinx_dma_tx_descriptor
*
desc
,
*
tail_desc
;
u32
reg
,
j
;
struct
xilinx_vdma_tx_segment
*
tail_segment
;
struct
xilinx_vdma_tx_segment
*
segment
,
*
last
=
NULL
;
int
i
=
0
;
/* This function was invoked with lock held */
if
(
chan
->
err
)
...
...
@@ -1087,17 +1119,6 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
tail_desc
=
list_last_entry
(
&
chan
->
pending_list
,
struct
xilinx_dma_tx_descriptor
,
node
);
tail_segment
=
list_last_entry
(
&
tail_desc
->
segments
,
struct
xilinx_vdma_tx_segment
,
node
);
/*
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
*/
if
(
chan
->
has_sg
)
dma_ctrl_write
(
chan
,
XILINX_DMA_REG_CURDESC
,
desc
->
async_tx
.
phys
);
/* Configure the hardware using info in the config structure */
if
(
chan
->
has_vflip
)
{
reg
=
dma_read
(
chan
,
XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP
);
...
...
@@ -1114,15 +1135,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
else
reg
&=
~
XILINX_DMA_DMACR_FRAMECNT_EN
;
/*
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
*/
if
(
chan
->
has_sg
||
!
config
->
park
)
reg
|=
XILINX_DMA_DMACR_CIRC_EN
;
/* If not parking, enable circular mode */
if
(
config
->
park
)
reg
&=
~
XILINX_DMA_DMACR_CIRC_EN
;
else
reg
|=
XILINX_DMA_DMACR_CIRC_EN
;
dma_ctrl_write
(
chan
,
XILINX_DMA_REG_DMACR
,
reg
);
...
...
@@ -1144,48 +1161,38 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
return
;
/* Start the transfer */
if
(
chan
->
has_sg
)
{
dma_ctrl_write
(
chan
,
XILINX_DMA_REG_TAILDESC
,
tail_segment
->
phys
);
list_splice_tail_init
(
&
chan
->
pending_list
,
&
chan
->
active_list
);
chan
->
desc_pendingcount
=
0
;
}
else
{
struct
xilinx_vdma_tx_segment
*
segment
,
*
last
=
NULL
;
int
i
=
0
;
if
(
chan
->
desc_submitcount
<
chan
->
num_frms
)
i
=
chan
->
desc_submitcount
;
list_for_each_entry
(
segment
,
&
desc
->
segments
,
node
)
{
if
(
chan
->
ext_addr
)
vdma_desc_write_64
(
chan
,
XILINX_VDMA_REG_START_ADDRESS_64
(
i
++
),
segment
->
hw
.
buf_addr
,
segment
->
hw
.
buf_addr_msb
);
else
vdma_desc_write
(
chan
,
if
(
chan
->
desc_submitcount
<
chan
->
num_frms
)
i
=
chan
->
desc_submitcount
;
list_for_each_entry
(
segment
,
&
desc
->
segments
,
node
)
{
if
(
chan
->
ext_addr
)
vdma_desc_write_64
(
chan
,
XILINX_VDMA_REG_START_ADDRESS_64
(
i
++
),
segment
->
hw
.
buf_addr
,
segment
->
hw
.
buf_addr_msb
);
else
vdma_desc_write
(
chan
,
XILINX_VDMA_REG_START_ADDRESS
(
i
++
),
segment
->
hw
.
buf_addr
);
last
=
segment
;
}
last
=
segment
;
}
if
(
!
last
)
return
;
if
(
!
last
)
return
;
/* HW expects these parameters to be same for one transaction */
vdma_desc_write
(
chan
,
XILINX_DMA_REG_HSIZE
,
last
->
hw
.
hsize
);
vdma_desc_write
(
chan
,
XILINX_DMA_REG_FRMDLY_STRIDE
,
last
->
hw
.
stride
);
vdma_desc_write
(
chan
,
XILINX_DMA_REG_VSIZE
,
last
->
hw
.
vsize
);
/* HW expects these parameters to be same for one transaction */
vdma_desc_write
(
chan
,
XILINX_DMA_REG_HSIZE
,
last
->
hw
.
hsize
);
vdma_desc_write
(
chan
,
XILINX_DMA_REG_FRMDLY_STRIDE
,
last
->
hw
.
stride
);
vdma_desc_write
(
chan
,
XILINX_DMA_REG_VSIZE
,
last
->
hw
.
vsize
);
chan
->
desc_submitcount
++
;
chan
->
desc_pendingcount
--
;
list_del
(
&
desc
->
node
);
list_add_tail
(
&
desc
->
node
,
&
chan
->
active_list
);
if
(
chan
->
desc_submitcount
==
chan
->
num_frms
)
chan
->
desc_submitcount
=
0
;
}
chan
->
desc_submitcount
++
;
chan
->
desc_pendingcount
--
;
list_del
(
&
desc
->
node
);
list_add_tail
(
&
desc
->
node
,
&
chan
->
active_list
);
if
(
chan
->
desc_submitcount
==
chan
->
num_frms
)
chan
->
desc_submitcount
=
0
;
chan
->
idle
=
false
;
}
...
...
@@ -1254,7 +1261,7 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write
(
chan
,
XILINX_DMA_REG_BTT
,
hw
->
control
&
XILINX_DMA_MAX_TRANS_LEN
);
hw
->
control
&
chan
->
xdev
->
max_buffer_len
);
}
list_splice_tail_init
(
&
chan
->
pending_list
,
&
chan
->
active_list
);
...
...
@@ -1357,7 +1364,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
/* Start the transfer */
dma_ctrl_write
(
chan
,
XILINX_DMA_REG_BTT
,
hw
->
control
&
XILINX_DMA_MAX_TRANS_LEN
);
hw
->
control
&
chan
->
xdev
->
max_buffer_len
);
}
list_splice_tail_init
(
&
chan
->
pending_list
,
&
chan
->
active_list
);
...
...
@@ -1718,7 +1725,7 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
struct
xilinx_cdma_tx_segment
*
segment
;
struct
xilinx_cdma_desc_hw
*
hw
;
if
(
!
len
||
len
>
XILINX_DMA_MAX_TRANS_LEN
)
if
(
!
len
||
len
>
chan
->
xdev
->
max_buffer_len
)
return
NULL
;
desc
=
xilinx_dma_alloc_tx_descriptor
(
chan
);
...
...
@@ -1808,8 +1815,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
copy
=
min_t
(
size_t
,
sg_dma_len
(
sg
)
-
sg_used
,
XILINX_DMA_MAX_TRANS_LEN
);
copy
=
xilinx_dma_calc_copysize
(
chan
,
sg_dma_len
(
sg
)
,
sg_used
);
hw
=
&
segment
->
hw
;
/* Fill in the descriptor */
...
...
@@ -1913,8 +1920,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
* Calculate the maximum number of bytes to transfer,
* making sure it is less than the hw limit
*/
copy
=
min_t
(
size_t
,
period_len
-
sg_used
,
XILINX_DMA_MAX_TRANS_LEN
);
copy
=
xilinx_dma_calc_copysize
(
chan
,
period_len
,
sg_used
);
hw
=
&
segment
->
hw
;
xilinx_axidma_buf
(
chan
,
hw
,
buf_addr
,
sg_used
,
period_len
*
i
);
...
...
@@ -2389,7 +2396,6 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan
->
dev
=
xdev
->
dev
;
chan
->
xdev
=
xdev
;
chan
->
has_sg
=
xdev
->
has_sg
;
chan
->
desc_pendingcount
=
0x0
;
chan
->
ext_addr
=
xdev
->
ext_addr
;
/* This variable ensures that descriptors are not
...
...
@@ -2489,6 +2495,15 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
chan
->
stop_transfer
=
xilinx_dma_stop_transfer
;
}
/* check if SG is enabled (only for AXIDMA and CDMA) */
if
(
xdev
->
dma_config
->
dmatype
!=
XDMA_TYPE_VDMA
)
{
if
(
dma_ctrl_read
(
chan
,
XILINX_DMA_REG_DMASR
)
&
XILINX_DMA_DMASR_SG_MASK
)
chan
->
has_sg
=
true
;
dev_dbg
(
chan
->
dev
,
"ch %d: SG %s
\n
"
,
chan
->
id
,
chan
->
has_sg
?
"enabled"
:
"disabled"
);
}
/* Initialize the tasklet */
tasklet_init
(
&
chan
->
tasklet
,
xilinx_dma_do_tasklet
,
(
unsigned
long
)
chan
);
...
...
@@ -2596,7 +2611,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
struct
xilinx_dma_device
*
xdev
;
struct
device_node
*
child
,
*
np
=
pdev
->
dev
.
of_node
;
struct
resource
*
io
;
u32
num_frames
,
addr_width
;
u32
num_frames
,
addr_width
,
len_width
;
int
i
,
err
;
/* Allocate and initialize the DMA engine structure */
...
...
@@ -2627,9 +2642,24 @@ static int xilinx_dma_probe(struct platform_device *pdev)
return
PTR_ERR
(
xdev
->
regs
);
/* Retrieve the DMA engine properties from the device tree */
xdev
->
has_sg
=
of_property_read_bool
(
node
,
"xlnx,include-sg"
);
if
(
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_AXIDMA
)
xdev
->
max_buffer_len
=
GENMASK
(
XILINX_DMA_MAX_TRANS_LEN_MAX
-
1
,
0
);
if
(
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_AXIDMA
)
{
xdev
->
mcdma
=
of_property_read_bool
(
node
,
"xlnx,mcdma"
);
if
(
!
of_property_read_u32
(
node
,
"xlnx,sg-length-width"
,
&
len_width
))
{
if
(
len_width
<
XILINX_DMA_MAX_TRANS_LEN_MIN
||
len_width
>
XILINX_DMA_V2_MAX_TRANS_LEN_MAX
)
{
dev_warn
(
xdev
->
dev
,
"invalid xlnx,sg-length-width property value. Using default width
\n
"
);
}
else
{
if
(
len_width
>
XILINX_DMA_MAX_TRANS_LEN_MAX
)
dev_warn
(
xdev
->
dev
,
"Please ensure that IP supports buffer length > 23 bits
\n
"
);
xdev
->
max_buffer_len
=
GENMASK
(
len_width
-
1
,
0
);
}
}
}
if
(
xdev
->
dma_config
->
dmatype
==
XDMA_TYPE_VDMA
)
{
err
=
of_property_read_u32
(
node
,
"xlnx,num-fstores"
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment