Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
896e041e
Commit
896e041e
authored
Mar 14, 2016
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/xilinx' into for-linus
parents
0dae1845
69490634
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
109 additions
and
95 deletions
+109
-95
drivers/dma/xilinx/xilinx_vdma.c
drivers/dma/xilinx/xilinx_vdma.c
+109
-95
No files found.
drivers/dma/xilinx/xilinx_vdma.c
View file @
896e041e
...
@@ -28,6 +28,7 @@
...
@@ -28,6 +28,7 @@
#include <linux/init.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_address.h>
#include <linux/of_dma.h>
#include <linux/of_dma.h>
...
@@ -190,8 +191,7 @@ struct xilinx_vdma_tx_descriptor {
...
@@ -190,8 +191,7 @@ struct xilinx_vdma_tx_descriptor {
* @desc_offset: TX descriptor registers offset
* @desc_offset: TX descriptor registers offset
* @lock: Descriptor operation lock
* @lock: Descriptor operation lock
* @pending_list: Descriptors waiting
* @pending_list: Descriptors waiting
* @active_desc: Active descriptor
* @active_list: Descriptors ready to submit
* @allocated_desc: Allocated descriptor
* @done_list: Complete descriptors
* @done_list: Complete descriptors
* @common: DMA common channel
* @common: DMA common channel
* @desc_pool: Descriptors pool
* @desc_pool: Descriptors pool
...
@@ -206,6 +206,7 @@ struct xilinx_vdma_tx_descriptor {
...
@@ -206,6 +206,7 @@ struct xilinx_vdma_tx_descriptor {
* @tasklet: Cleanup work after irq
* @tasklet: Cleanup work after irq
* @config: Device configuration info
* @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync
* @flush_on_fsync: Flush on Frame sync
* @desc_pendingcount: Descriptor pending count
*/
*/
struct
xilinx_vdma_chan
{
struct
xilinx_vdma_chan
{
struct
xilinx_vdma_device
*
xdev
;
struct
xilinx_vdma_device
*
xdev
;
...
@@ -213,8 +214,7 @@ struct xilinx_vdma_chan {
...
@@ -213,8 +214,7 @@ struct xilinx_vdma_chan {
u32
desc_offset
;
u32
desc_offset
;
spinlock_t
lock
;
spinlock_t
lock
;
struct
list_head
pending_list
;
struct
list_head
pending_list
;
struct
xilinx_vdma_tx_descriptor
*
active_desc
;
struct
list_head
active_list
;
struct
xilinx_vdma_tx_descriptor
*
allocated_desc
;
struct
list_head
done_list
;
struct
list_head
done_list
;
struct
dma_chan
common
;
struct
dma_chan
common
;
struct
dma_pool
*
desc_pool
;
struct
dma_pool
*
desc_pool
;
...
@@ -229,6 +229,7 @@ struct xilinx_vdma_chan {
...
@@ -229,6 +229,7 @@ struct xilinx_vdma_chan {
struct
tasklet_struct
tasklet
;
struct
tasklet_struct
tasklet
;
struct
xilinx_vdma_config
config
;
struct
xilinx_vdma_config
config
;
bool
flush_on_fsync
;
bool
flush_on_fsync
;
u32
desc_pendingcount
;
};
};
/**
/**
...
@@ -254,6 +255,9 @@ struct xilinx_vdma_device {
...
@@ -254,6 +255,9 @@ struct xilinx_vdma_device {
container_of(chan, struct xilinx_vdma_chan, common)
container_of(chan, struct xilinx_vdma_chan, common)
#define to_vdma_tx_descriptor(tx) \
#define to_vdma_tx_descriptor(tx) \
container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx)
#define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
cond, delay_us, timeout_us)
/* IO accessors */
/* IO accessors */
static
inline
u32
vdma_read
(
struct
xilinx_vdma_chan
*
chan
,
u32
reg
)
static
inline
u32
vdma_read
(
struct
xilinx_vdma_chan
*
chan
,
u32
reg
)
...
@@ -342,19 +346,11 @@ static struct xilinx_vdma_tx_descriptor *
...
@@ -342,19 +346,11 @@ static struct xilinx_vdma_tx_descriptor *
xilinx_vdma_alloc_tx_descriptor
(
struct
xilinx_vdma_chan
*
chan
)
xilinx_vdma_alloc_tx_descriptor
(
struct
xilinx_vdma_chan
*
chan
)
{
{
struct
xilinx_vdma_tx_descriptor
*
desc
;
struct
xilinx_vdma_tx_descriptor
*
desc
;
unsigned
long
flags
;
if
(
chan
->
allocated_desc
)
return
chan
->
allocated_desc
;
desc
=
kzalloc
(
sizeof
(
*
desc
),
GFP_KERNEL
);
desc
=
kzalloc
(
sizeof
(
*
desc
),
GFP_KERNEL
);
if
(
!
desc
)
if
(
!
desc
)
return
NULL
;
return
NULL
;
spin_lock_irqsave
(
&
chan
->
lock
,
flags
);
chan
->
allocated_desc
=
desc
;
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
INIT_LIST_HEAD
(
&
desc
->
segments
);
INIT_LIST_HEAD
(
&
desc
->
segments
);
return
desc
;
return
desc
;
...
@@ -412,9 +408,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
...
@@ -412,9 +408,7 @@ static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan)
xilinx_vdma_free_desc_list
(
chan
,
&
chan
->
pending_list
);
xilinx_vdma_free_desc_list
(
chan
,
&
chan
->
pending_list
);
xilinx_vdma_free_desc_list
(
chan
,
&
chan
->
done_list
);
xilinx_vdma_free_desc_list
(
chan
,
&
chan
->
done_list
);
xilinx_vdma_free_desc_list
(
chan
,
&
chan
->
active_list
);
xilinx_vdma_free_tx_descriptor
(
chan
,
chan
->
active_desc
);
chan
->
active_desc
=
NULL
;
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
}
}
...
@@ -560,18 +554,17 @@ static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
...
@@ -560,18 +554,17 @@ static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan)
*/
*/
static
void
xilinx_vdma_halt
(
struct
xilinx_vdma_chan
*
chan
)
static
void
xilinx_vdma_halt
(
struct
xilinx_vdma_chan
*
chan
)
{
{
int
loop
=
XILINX_VDMA_LOOP_COUNT
;
int
err
;
u32
val
;
vdma_ctrl_clr
(
chan
,
XILINX_VDMA_REG_DMACR
,
XILINX_VDMA_DMACR_RUNSTOP
);
vdma_ctrl_clr
(
chan
,
XILINX_VDMA_REG_DMACR
,
XILINX_VDMA_DMACR_RUNSTOP
);
/* Wait for the hardware to halt */
/* Wait for the hardware to halt */
do
{
err
=
xilinx_vdma_poll_timeout
(
chan
,
XILINX_VDMA_REG_DMASR
,
val
,
if
(
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMASR
)
&
(
val
&
XILINX_VDMA_DMASR_HALTED
),
0
,
XILINX_VDMA_DMASR_HALTED
)
XILINX_VDMA_LOOP_COUNT
);
break
;
}
while
(
loop
--
);
if
(
!
loop
)
{
if
(
err
)
{
dev_err
(
chan
->
dev
,
"Cannot stop channel %p: %x
\n
"
,
dev_err
(
chan
->
dev
,
"Cannot stop channel %p: %x
\n
"
,
chan
,
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMASR
));
chan
,
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMASR
));
chan
->
err
=
true
;
chan
->
err
=
true
;
...
@@ -586,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
...
@@ -586,18 +579,17 @@ static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan)
*/
*/
static
void
xilinx_vdma_start
(
struct
xilinx_vdma_chan
*
chan
)
static
void
xilinx_vdma_start
(
struct
xilinx_vdma_chan
*
chan
)
{
{
int
loop
=
XILINX_VDMA_LOOP_COUNT
;
int
err
;
u32
val
;
vdma_ctrl_set
(
chan
,
XILINX_VDMA_REG_DMACR
,
XILINX_VDMA_DMACR_RUNSTOP
);
vdma_ctrl_set
(
chan
,
XILINX_VDMA_REG_DMACR
,
XILINX_VDMA_DMACR_RUNSTOP
);
/* Wait for the hardware to start */
/* Wait for the hardware to start */
do
{
err
=
xilinx_vdma_poll_timeout
(
chan
,
XILINX_VDMA_REG_DMASR
,
val
,
if
(
!
(
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMASR
)
&
!
(
val
&
XILINX_VDMA_DMASR_HALTED
),
0
,
XILINX_VDMA_DMASR_HALTED
))
XILINX_VDMA_LOOP_COUNT
);
break
;
}
while
(
loop
--
);
if
(
!
loop
)
{
if
(
err
)
{
dev_err
(
chan
->
dev
,
"Cannot start channel %p: %x
\n
"
,
dev_err
(
chan
->
dev
,
"Cannot start channel %p: %x
\n
"
,
chan
,
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMASR
));
chan
,
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMASR
));
...
@@ -614,45 +606,39 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
...
@@ -614,45 +606,39 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan)
static
void
xilinx_vdma_start_transfer
(
struct
xilinx_vdma_chan
*
chan
)
static
void
xilinx_vdma_start_transfer
(
struct
xilinx_vdma_chan
*
chan
)
{
{
struct
xilinx_vdma_config
*
config
=
&
chan
->
config
;
struct
xilinx_vdma_config
*
config
=
&
chan
->
config
;
struct
xilinx_vdma_tx_descriptor
*
desc
;
struct
xilinx_vdma_tx_descriptor
*
desc
,
*
tail_desc
;
unsigned
long
flags
;
u32
reg
;
u32
reg
;
struct
xilinx_vdma_tx_segment
*
head
,
*
tail
=
NULL
;
struct
xilinx_vdma_tx_segment
*
tail_segment
;
/* This function was invoked with lock held */
if
(
chan
->
err
)
if
(
chan
->
err
)
return
;
return
;
spin_lock_irqsave
(
&
chan
->
lock
,
flags
);
/* There's already an active descriptor, bail out. */
if
(
chan
->
active_desc
)
goto
out_unlock
;
if
(
list_empty
(
&
chan
->
pending_list
))
if
(
list_empty
(
&
chan
->
pending_list
))
goto
out_unlock
;
return
;
desc
=
list_first_entry
(
&
chan
->
pending_list
,
desc
=
list_first_entry
(
&
chan
->
pending_list
,
struct
xilinx_vdma_tx_descriptor
,
node
);
struct
xilinx_vdma_tx_descriptor
,
node
);
tail_desc
=
list_last_entry
(
&
chan
->
pending_list
,
struct
xilinx_vdma_tx_descriptor
,
node
);
tail_segment
=
list_last_entry
(
&
tail_desc
->
segments
,
struct
xilinx_vdma_tx_segment
,
node
);
/* If it is SG mode and hardware is busy, cannot submit */
/* If it is SG mode and hardware is busy, cannot submit */
if
(
chan
->
has_sg
&&
xilinx_vdma_is_running
(
chan
)
&&
if
(
chan
->
has_sg
&&
xilinx_vdma_is_running
(
chan
)
&&
!
xilinx_vdma_is_idle
(
chan
))
{
!
xilinx_vdma_is_idle
(
chan
))
{
dev_dbg
(
chan
->
dev
,
"DMA controller still busy
\n
"
);
dev_dbg
(
chan
->
dev
,
"DMA controller still busy
\n
"
);
goto
out_unlock
;
return
;
}
}
/*
/*
* If hardware is idle, then all descriptors on the running lists are
* If hardware is idle, then all descriptors on the running lists are
* done, start new transfers
* done, start new transfers
*/
*/
if
(
chan
->
has_sg
)
{
if
(
chan
->
has_sg
)
head
=
list_first_entry
(
&
desc
->
segments
,
vdma_ctrl_write
(
chan
,
XILINX_VDMA_REG_CURDESC
,
struct
xilinx_vdma_tx_segment
,
node
);
desc
->
async_tx
.
phys
);
tail
=
list_entry
(
desc
->
segments
.
prev
,
struct
xilinx_vdma_tx_segment
,
node
);
vdma_ctrl_write
(
chan
,
XILINX_VDMA_REG_CURDESC
,
head
->
phys
);
}
/* Configure the hardware using info in the config structure */
/* Configure the hardware using info in the config structure */
reg
=
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMACR
);
reg
=
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMACR
);
...
@@ -662,6 +648,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
...
@@ -662,6 +648,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
else
else
reg
&=
~
XILINX_VDMA_DMACR_FRAMECNT_EN
;
reg
&=
~
XILINX_VDMA_DMACR_FRAMECNT_EN
;
/* Configure channel to allow number frame buffers */
vdma_ctrl_write
(
chan
,
XILINX_VDMA_REG_FRMSTORE
,
chan
->
desc_pendingcount
);
/*
/*
* With SG, start with circular mode, so that BDs can be fetched.
* With SG, start with circular mode, so that BDs can be fetched.
* In direct register mode, if not parking, enable circular mode
* In direct register mode, if not parking, enable circular mode
...
@@ -690,16 +680,19 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
...
@@ -690,16 +680,19 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
xilinx_vdma_start
(
chan
);
xilinx_vdma_start
(
chan
);
if
(
chan
->
err
)
if
(
chan
->
err
)
goto
out_unlock
;
return
;
/* Start the transfer */
/* Start the transfer */
if
(
chan
->
has_sg
)
{
if
(
chan
->
has_sg
)
{
vdma_ctrl_write
(
chan
,
XILINX_VDMA_REG_TAILDESC
,
tail
->
phys
);
vdma_ctrl_write
(
chan
,
XILINX_VDMA_REG_TAILDESC
,
tail_segment
->
phys
);
}
else
{
}
else
{
struct
xilinx_vdma_tx_segment
*
segment
,
*
last
=
NULL
;
struct
xilinx_vdma_tx_segment
*
segment
,
*
last
=
NULL
;
int
i
=
0
;
int
i
=
0
;
list_for_each_entry
(
segment
,
&
desc
->
segments
,
node
)
{
list_for_each_entry
(
desc
,
&
chan
->
pending_list
,
node
)
{
segment
=
list_first_entry
(
&
desc
->
segments
,
struct
xilinx_vdma_tx_segment
,
node
);
vdma_desc_write
(
chan
,
vdma_desc_write
(
chan
,
XILINX_VDMA_REG_START_ADDRESS
(
i
++
),
XILINX_VDMA_REG_START_ADDRESS
(
i
++
),
segment
->
hw
.
buf_addr
);
segment
->
hw
.
buf_addr
);
...
@@ -707,7 +700,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
...
@@ -707,7 +700,7 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
}
}
if
(
!
last
)
if
(
!
last
)
goto
out_unlock
;
return
;
/* HW expects these parameters to be same for one transaction */
/* HW expects these parameters to be same for one transaction */
vdma_desc_write
(
chan
,
XILINX_VDMA_REG_HSIZE
,
last
->
hw
.
hsize
);
vdma_desc_write
(
chan
,
XILINX_VDMA_REG_HSIZE
,
last
->
hw
.
hsize
);
...
@@ -716,11 +709,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
...
@@ -716,11 +709,8 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
vdma_desc_write
(
chan
,
XILINX_VDMA_REG_VSIZE
,
last
->
hw
.
vsize
);
vdma_desc_write
(
chan
,
XILINX_VDMA_REG_VSIZE
,
last
->
hw
.
vsize
);
}
}
list_del
(
&
desc
->
node
);
list_splice_tail_init
(
&
chan
->
pending_list
,
&
chan
->
active_list
);
chan
->
active_desc
=
desc
;
chan
->
desc_pendingcount
=
0
;
out_unlock:
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
}
}
/**
/**
...
@@ -730,8 +720,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
...
@@ -730,8 +720,11 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
static
void
xilinx_vdma_issue_pending
(
struct
dma_chan
*
dchan
)
static
void
xilinx_vdma_issue_pending
(
struct
dma_chan
*
dchan
)
{
{
struct
xilinx_vdma_chan
*
chan
=
to_xilinx_chan
(
dchan
);
struct
xilinx_vdma_chan
*
chan
=
to_xilinx_chan
(
dchan
);
unsigned
long
flags
;
spin_lock_irqsave
(
&
chan
->
lock
,
flags
);
xilinx_vdma_start_transfer
(
chan
);
xilinx_vdma_start_transfer
(
chan
);
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
}
}
/**
/**
...
@@ -742,24 +735,17 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
...
@@ -742,24 +735,17 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan)
*/
*/
static
void
xilinx_vdma_complete_descriptor
(
struct
xilinx_vdma_chan
*
chan
)
static
void
xilinx_vdma_complete_descriptor
(
struct
xilinx_vdma_chan
*
chan
)
{
{
struct
xilinx_vdma_tx_descriptor
*
desc
;
struct
xilinx_vdma_tx_descriptor
*
desc
,
*
next
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
chan
->
lock
,
flags
);
/* This function was invoked with lock held */
if
(
list_empty
(
&
chan
->
active_list
))
return
;
desc
=
chan
->
active_desc
;
list_for_each_entry_safe
(
desc
,
next
,
&
chan
->
active_list
,
node
)
{
if
(
!
desc
)
{
list_del
(
&
desc
->
node
);
d
ev_dbg
(
chan
->
dev
,
"no running descriptors
\n
"
);
d
ma_cookie_complete
(
&
desc
->
async_tx
);
goto
out_unlock
;
list_add_tail
(
&
desc
->
node
,
&
chan
->
done_list
)
;
}
}
dma_cookie_complete
(
&
desc
->
async_tx
);
list_add_tail
(
&
desc
->
node
,
&
chan
->
done_list
);
chan
->
active_desc
=
NULL
;
out_unlock:
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
}
}
/**
/**
...
@@ -770,21 +756,17 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
...
@@ -770,21 +756,17 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
*/
*/
static
int
xilinx_vdma_reset
(
struct
xilinx_vdma_chan
*
chan
)
static
int
xilinx_vdma_reset
(
struct
xilinx_vdma_chan
*
chan
)
{
{
int
loop
=
XILINX_VDMA_LOOP_COUNT
;
int
err
;
u32
tmp
;
u32
tmp
;
vdma_ctrl_set
(
chan
,
XILINX_VDMA_REG_DMACR
,
XILINX_VDMA_DMACR_RESET
);
vdma_ctrl_set
(
chan
,
XILINX_VDMA_REG_DMACR
,
XILINX_VDMA_DMACR_RESET
);
tmp
=
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMACR
)
&
XILINX_VDMA_DMACR_RESET
;
/* Wait for the hardware to finish reset */
/* Wait for the hardware to finish reset */
do
{
err
=
xilinx_vdma_poll_timeout
(
chan
,
XILINX_VDMA_REG_DMACR
,
tmp
,
tmp
=
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMACR
)
&
!
(
tmp
&
XILINX_VDMA_DMACR_RESET
),
0
,
XILINX_VDMA_DMACR_RESET
;
XILINX_VDMA_LOOP_COUNT
);
}
while
(
loop
--
&&
tmp
);
if
(
!
loop
)
{
if
(
err
)
{
dev_err
(
chan
->
dev
,
"reset timeout, cr %x, sr %x
\n
"
,
dev_err
(
chan
->
dev
,
"reset timeout, cr %x, sr %x
\n
"
,
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMACR
),
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMACR
),
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMASR
));
vdma_ctrl_read
(
chan
,
XILINX_VDMA_REG_DMASR
));
...
@@ -793,7 +775,7 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
...
@@ -793,7 +775,7 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
chan
->
err
=
false
;
chan
->
err
=
false
;
return
0
;
return
err
;
}
}
/**
/**
...
@@ -870,14 +852,54 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
...
@@ -870,14 +852,54 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
}
}
if
(
status
&
XILINX_VDMA_DMASR_FRM_CNT_IRQ
)
{
if
(
status
&
XILINX_VDMA_DMASR_FRM_CNT_IRQ
)
{
spin_lock
(
&
chan
->
lock
);
xilinx_vdma_complete_descriptor
(
chan
);
xilinx_vdma_complete_descriptor
(
chan
);
xilinx_vdma_start_transfer
(
chan
);
xilinx_vdma_start_transfer
(
chan
);
spin_unlock
(
&
chan
->
lock
);
}
}
tasklet_schedule
(
&
chan
->
tasklet
);
tasklet_schedule
(
&
chan
->
tasklet
);
return
IRQ_HANDLED
;
return
IRQ_HANDLED
;
}
}
/**
* append_desc_queue - Queuing descriptor
* @chan: Driver specific dma channel
* @desc: dma transaction descriptor
*/
static
void
append_desc_queue
(
struct
xilinx_vdma_chan
*
chan
,
struct
xilinx_vdma_tx_descriptor
*
desc
)
{
struct
xilinx_vdma_tx_segment
*
tail_segment
;
struct
xilinx_vdma_tx_descriptor
*
tail_desc
;
if
(
list_empty
(
&
chan
->
pending_list
))
goto
append
;
/*
* Add the hardware descriptor to the chain of hardware descriptors
* that already exists in memory.
*/
tail_desc
=
list_last_entry
(
&
chan
->
pending_list
,
struct
xilinx_vdma_tx_descriptor
,
node
);
tail_segment
=
list_last_entry
(
&
tail_desc
->
segments
,
struct
xilinx_vdma_tx_segment
,
node
);
tail_segment
->
hw
.
next_desc
=
(
u32
)
desc
->
async_tx
.
phys
;
/*
* Add the software descriptor and all children to the list
* of pending transactions
*/
append:
list_add_tail
(
&
desc
->
node
,
&
chan
->
pending_list
);
chan
->
desc_pendingcount
++
;
if
(
unlikely
(
chan
->
desc_pendingcount
>
chan
->
num_frms
))
{
dev_dbg
(
chan
->
dev
,
"desc pendingcount is too high
\n
"
);
chan
->
desc_pendingcount
=
chan
->
num_frms
;
}
}
/**
/**
* xilinx_vdma_tx_submit - Submit DMA transaction
* xilinx_vdma_tx_submit - Submit DMA transaction
* @tx: Async transaction descriptor
* @tx: Async transaction descriptor
...
@@ -906,11 +928,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
...
@@ -906,11 +928,8 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie
=
dma_cookie_assign
(
tx
);
cookie
=
dma_cookie_assign
(
tx
);
/* Append the transaction to the pending transactions queue. */
/* Put this transaction onto the tail of the pending queue */
list_add_tail
(
&
desc
->
node
,
&
chan
->
pending_list
);
append_desc_queue
(
chan
,
desc
);
/* Free the allocated desc */
chan
->
allocated_desc
=
NULL
;
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
chan
->
lock
,
flags
);
...
@@ -973,13 +992,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
...
@@ -973,13 +992,6 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
else
else
hw
->
buf_addr
=
xt
->
src_start
;
hw
->
buf_addr
=
xt
->
src_start
;
/* Link the previous next descriptor to current */
if
(
!
list_empty
(
&
desc
->
segments
))
{
prev
=
list_last_entry
(
&
desc
->
segments
,
struct
xilinx_vdma_tx_segment
,
node
);
prev
->
hw
.
next_desc
=
segment
->
phys
;
}
/* Insert the segment into the descriptor segments list. */
/* Insert the segment into the descriptor segments list. */
list_add_tail
(
&
segment
->
node
,
&
desc
->
segments
);
list_add_tail
(
&
segment
->
node
,
&
desc
->
segments
);
...
@@ -988,7 +1000,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
...
@@ -988,7 +1000,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
/* Link the last hardware descriptor with the first. */
/* Link the last hardware descriptor with the first. */
segment
=
list_first_entry
(
&
desc
->
segments
,
segment
=
list_first_entry
(
&
desc
->
segments
,
struct
xilinx_vdma_tx_segment
,
node
);
struct
xilinx_vdma_tx_segment
,
node
);
prev
->
hw
.
next_desc
=
segment
->
phys
;
desc
->
async_tx
.
phys
=
segment
->
phys
;
return
&
desc
->
async_tx
;
return
&
desc
->
async_tx
;
...
@@ -1127,10 +1139,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
...
@@ -1127,10 +1139,12 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
chan
->
dev
=
xdev
->
dev
;
chan
->
dev
=
xdev
->
dev
;
chan
->
xdev
=
xdev
;
chan
->
xdev
=
xdev
;
chan
->
has_sg
=
xdev
->
has_sg
;
chan
->
has_sg
=
xdev
->
has_sg
;
chan
->
desc_pendingcount
=
0x0
;
spin_lock_init
(
&
chan
->
lock
);
spin_lock_init
(
&
chan
->
lock
);
INIT_LIST_HEAD
(
&
chan
->
pending_list
);
INIT_LIST_HEAD
(
&
chan
->
pending_list
);
INIT_LIST_HEAD
(
&
chan
->
done_list
);
INIT_LIST_HEAD
(
&
chan
->
done_list
);
INIT_LIST_HEAD
(
&
chan
->
active_list
);
/* Retrieve the channel properties from the device tree */
/* Retrieve the channel properties from the device tree */
has_dre
=
of_property_read_bool
(
node
,
"xlnx,include-dre"
);
has_dre
=
of_property_read_bool
(
node
,
"xlnx,include-dre"
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment