Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3f134c95
Commit
3f134c95
authored
Nov 11, 2022
by
Vinod Koul
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'fixes' into next
Merge due to at_hdmac driver dependency
parents
739153a6
c47e6403
Changes
14
Show whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
150 additions
and
121 deletions
+150
-121
drivers/dma/apple-admac.c
drivers/dma/apple-admac.c
+1
-1
drivers/dma/at_hdmac.c
drivers/dma/at_hdmac.c
+60
-93
drivers/dma/at_hdmac_regs.h
drivers/dma/at_hdmac_regs.h
+5
-5
drivers/dma/idxd/cdev.c
drivers/dma/idxd/cdev.c
+18
-0
drivers/dma/idxd/device.c
drivers/dma/idxd/device.c
+17
-9
drivers/dma/idxd/idxd.h
drivers/dma/idxd/idxd.h
+32
-0
drivers/dma/idxd/init.c
drivers/dma/idxd/init.c
+2
-2
drivers/dma/idxd/sysfs.c
drivers/dma/idxd/sysfs.c
+1
-1
drivers/dma/mv_xor_v2.c
drivers/dma/mv_xor_v2.c
+1
-0
drivers/dma/pxa_dma.c
drivers/dma/pxa_dma.c
+2
-2
drivers/dma/stm32-dma.c
drivers/dma/stm32-dma.c
+6
-8
drivers/dma/stm32-mdma.c
drivers/dma/stm32-mdma.c
+1
-0
drivers/dma/ti/k3-udma-glue.c
drivers/dma/ti/k3-udma-glue.c
+3
-0
include/uapi/linux/idxd.h
include/uapi/linux/idxd.h
+1
-0
No files found.
drivers/dma/apple-admac.c
View file @
3f134c95
...
...
@@ -585,7 +585,7 @@ static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
return
NULL
;
}
return
&
ad
->
channels
[
index
].
chan
;
return
dma_get_slave_channel
(
&
ad
->
channels
[
index
].
chan
)
;
}
static
int
admac_drain_reports
(
struct
admac_data
*
ad
,
int
channo
)
...
...
drivers/dma/at_hdmac.c
View file @
3f134c95
...
...
@@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
ATC_SPIP_BOUNDARY
(
first
->
boundary
));
channel_writel
(
atchan
,
DPIP
,
ATC_DPIP_HOLE
(
first
->
dst_hole
)
|
ATC_DPIP_BOUNDARY
(
first
->
boundary
));
/* Don't allow CPU to reorder channel enable. */
wmb
();
dma_writel
(
atdma
,
CHER
,
atchan
->
mask
);
vdbg_dump_regs
(
atchan
);
...
...
@@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
struct
at_desc
*
desc_first
=
atc_first_active
(
atchan
);
struct
at_desc
*
desc
;
int
ret
;
u32
ctrla
,
dscr
,
trials
;
u32
ctrla
,
dscr
;
unsigned
int
i
;
/*
* If the cookie doesn't match to the currently running transfer then
...
...
@@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
dscr
=
channel_readl
(
atchan
,
DSCR
);
rmb
();
/* ensure DSCR is read before CTRLA */
ctrla
=
channel_readl
(
atchan
,
CTRLA
);
for
(
trials
=
0
;
trials
<
ATC_MAX_DSCR_TRIALS
;
++
trials
)
{
for
(
i
=
0
;
i
<
ATC_MAX_DSCR_TRIALS
;
++
i
)
{
u32
new_dscr
;
rmb
();
/* ensure DSCR is read after CTRLA */
...
...
@@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
rmb
();
/* ensure DSCR is read before CTRLA */
ctrla
=
channel_readl
(
atchan
,
CTRLA
);
}
if
(
unlikely
(
trials
>
=
ATC_MAX_DSCR_TRIALS
))
if
(
unlikely
(
i
=
=
ATC_MAX_DSCR_TRIALS
))
return
-
ETIMEDOUT
;
/* for the first descriptor we can be more accurate */
...
...
@@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
if
(
!
atc_chan_is_cyclic
(
atchan
))
dma_cookie_complete
(
txd
);
/* If the transfer was a memset, free our temporary buffer */
if
(
desc
->
memset_buffer
)
{
dma_pool_free
(
atdma
->
memset_pool
,
desc
->
memset_vaddr
,
desc
->
memset_paddr
);
desc
->
memset_buffer
=
false
;
}
/* move children to free_list */
list_splice_init
(
&
desc
->
tx_list
,
&
atchan
->
free_list
);
/* move myself to free_list */
list_move
(
&
desc
->
desc_node
,
&
atchan
->
free_list
);
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
dma_descriptor_unmap
(
txd
);
...
...
@@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
dmaengine_desc_get_callback_invoke
(
txd
,
NULL
);
dma_run_dependencies
(
txd
);
}
/**
* atc_complete_all - finish work for all transactions
* @atchan: channel to complete transactions for
*
* Eventually submit queued descriptors if any
*
* Assume channel is idle while calling this function
* Called with atchan->lock held and bh disabled
*/
static
void
atc_complete_all
(
struct
at_dma_chan
*
atchan
)
{
struct
at_desc
*
desc
,
*
_desc
;
LIST_HEAD
(
list
);
unsigned
long
flags
;
dev_vdbg
(
chan2dev
(
&
atchan
->
chan_common
),
"complete all
\n
"
);
spin_lock_irqsave
(
&
atchan
->
lock
,
flags
);
/*
* Submit queued descriptors ASAP, i.e. before we go through
* the completed ones.
*/
if
(
!
list_empty
(
&
atchan
->
queue
))
atc_dostart
(
atchan
,
atc_first_queued
(
atchan
));
/* empty active_list now it is completed */
list_splice_init
(
&
atchan
->
active_list
,
&
list
);
/* empty queue list by moving descriptors (if any) to active_list */
list_splice_init
(
&
atchan
->
queue
,
&
atchan
->
active_list
);
/* move children to free_list */
list_splice_init
(
&
desc
->
tx_list
,
&
atchan
->
free_list
);
/* add myself to free_list */
list_add
(
&
desc
->
desc_node
,
&
atchan
->
free_list
);
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
list_for_each_entry_safe
(
desc
,
_desc
,
&
list
,
desc_node
)
atc_chain_complete
(
atchan
,
desc
);
/* If the transfer was a memset, free our temporary buffer */
if
(
desc
->
memset_buffer
)
{
dma_pool_free
(
atdma
->
memset_pool
,
desc
->
memset_vaddr
,
desc
->
memset_paddr
);
desc
->
memset_buffer
=
false
;
}
}
/**
...
...
@@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan)
*/
static
void
atc_advance_work
(
struct
at_dma_chan
*
atchan
)
{
struct
at_desc
*
desc
;
unsigned
long
flags
;
int
ret
;
dev_vdbg
(
chan2dev
(
&
atchan
->
chan_common
),
"advance_work
\n
"
);
spin_lock_irqsave
(
&
atchan
->
lock
,
flags
);
ret
=
atc_chan_is_enabled
(
atchan
);
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
if
(
ret
)
return
;
if
(
list_empty
(
&
atchan
->
active_list
)
||
list_is_singular
(
&
atchan
->
active_list
))
return
atc_complete_all
(
atchan
);
if
(
atc_chan_is_enabled
(
atchan
)
||
list_empty
(
&
atchan
->
active_list
))
return
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
atc_chain_complete
(
atchan
,
atc_first_active
(
atchan
));
desc
=
atc_first_active
(
atchan
);
/* Remove the transfer node from the active list. */
list_del_init
(
&
desc
->
desc_node
);
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
atc_chain_complete
(
atchan
,
desc
);
/* advance work */
spin_lock_irqsave
(
&
atchan
->
lock
,
flags
);
atc_dostart
(
atchan
,
atc_first_active
(
atchan
));
if
(
!
list_empty
(
&
atchan
->
active_list
))
{
desc
=
atc_first_queued
(
atchan
);
list_move_tail
(
&
desc
->
desc_node
,
&
atchan
->
active_list
);
atc_dostart
(
atchan
,
desc
);
}
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
}
...
...
@@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan)
static
void
atc_handle_error
(
struct
at_dma_chan
*
atchan
)
{
struct
at_desc
*
bad_desc
;
struct
at_desc
*
desc
;
struct
at_desc
*
child
;
unsigned
long
flags
;
...
...
@@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan)
bad_desc
=
atc_first_active
(
atchan
);
list_del_init
(
&
bad_desc
->
desc_node
);
/* As we are stopped, take advantage to push queued descriptors
* in active_list */
list_splice_init
(
&
atchan
->
queue
,
atchan
->
active_list
.
prev
);
/* Try to restart the controller */
if
(
!
list_empty
(
&
atchan
->
active_list
))
atc_dostart
(
atchan
,
atc_first_active
(
atchan
));
if
(
!
list_empty
(
&
atchan
->
active_list
))
{
desc
=
atc_first_queued
(
atchan
);
list_move_tail
(
&
desc
->
desc_node
,
&
atchan
->
active_list
);
atc_dostart
(
atchan
,
desc
);
}
/*
* KERN_CRITICAL may seem harsh, but since this only happens
...
...
@@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave
(
&
atchan
->
lock
,
flags
);
cookie
=
dma_cookie_assign
(
tx
);
if
(
list_empty
(
&
atchan
->
active_list
))
{
dev_vdbg
(
chan2dev
(
tx
->
chan
),
"tx_submit: started %u
\n
"
,
desc
->
txd
.
cookie
);
atc_dostart
(
atchan
,
desc
);
list_add_tail
(
&
desc
->
desc_node
,
&
atchan
->
active_list
);
}
else
{
dev_vdbg
(
chan2dev
(
tx
->
chan
),
"tx_submit: queued %u
\n
"
,
desc
->
txd
.
cookie
);
list_add_tail
(
&
desc
->
desc_node
,
&
atchan
->
queue
);
}
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
dev_vdbg
(
chan2dev
(
tx
->
chan
),
"tx_submit: queued %u
\n
"
,
desc
->
txd
.
cookie
);
return
cookie
;
}
...
...
@@ -1445,11 +1408,8 @@ static int atc_terminate_all(struct dma_chan *chan)
struct
at_dma_chan
*
atchan
=
to_at_dma_chan
(
chan
);
struct
at_dma
*
atdma
=
to_at_dma
(
chan
->
device
);
int
chan_id
=
atchan
->
chan_common
.
chan_id
;
struct
at_desc
*
desc
,
*
_desc
;
unsigned
long
flags
;
LIST_HEAD
(
list
);
dev_vdbg
(
chan2dev
(
chan
),
"%s
\n
"
,
__func__
);
/*
...
...
@@ -1468,19 +1428,15 @@ static int atc_terminate_all(struct dma_chan *chan)
cpu_relax
();
/* active_list entries will end up before queued entries */
list_splice_init
(
&
atchan
->
queue
,
&
list
);
list_splice_init
(
&
atchan
->
active_list
,
&
list
);
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
/* Flush all pending and queued descriptors */
list_for_each_entry_safe
(
desc
,
_desc
,
&
list
,
desc_node
)
atc_chain_complete
(
atchan
,
desc
);
list_splice_tail_init
(
&
atchan
->
queue
,
&
atchan
->
free_list
);
list_splice_tail_init
(
&
atchan
->
active_list
,
&
atchan
->
free_list
);
clear_bit
(
ATC_IS_PAUSED
,
&
atchan
->
status
);
/* if channel dedicated to cyclic operations, free it */
clear_bit
(
ATC_IS_CYCLIC
,
&
atchan
->
status
);
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
return
0
;
}
...
...
@@ -1535,20 +1491,26 @@ atc_tx_status(struct dma_chan *chan,
}
/**
* atc_issue_pending - try to finish work
* atc_issue_pending - takes the first transaction descriptor in the pending
* queue and starts the transfer.
* @chan: target DMA channel
*/
static
void
atc_issue_pending
(
struct
dma_chan
*
chan
)
{
struct
at_dma_chan
*
atchan
=
to_at_dma_chan
(
chan
);
struct
at_desc
*
desc
;
unsigned
long
flags
;
dev_vdbg
(
chan2dev
(
chan
),
"issue_pending
\n
"
);
/* Not needed for cyclic transfers */
if
(
atc_chan_is_
cyclic
(
atchan
))
return
;
spin_lock_irqsave
(
&
atchan
->
lock
,
flags
);
if
(
atc_chan_is_
enabled
(
atchan
)
||
list_empty
(
&
atchan
->
queue
))
return
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
)
;
atc_advance_work
(
atchan
);
desc
=
atc_first_queued
(
atchan
);
list_move_tail
(
&
desc
->
desc_node
,
&
atchan
->
active_list
);
atc_dostart
(
atchan
,
desc
);
spin_unlock_irqrestore
(
&
atchan
->
lock
,
flags
);
}
/**
...
...
@@ -1966,7 +1928,11 @@ static int __init at_dma_probe(struct platform_device *pdev)
dma_has_cap
(
DMA_SLAVE
,
atdma
->
dma_common
.
cap_mask
)
?
"slave "
:
""
,
plat_dat
->
nr_channels
);
dma_async_device_register
(
&
atdma
->
dma_common
);
err
=
dma_async_device_register
(
&
atdma
->
dma_common
);
if
(
err
)
{
dev_err
(
&
pdev
->
dev
,
"Unable to register: %d.
\n
"
,
err
);
goto
err_dma_async_device_register
;
}
/*
* Do not return an error if the dmac node is not present in order to
...
...
@@ -1986,6 +1952,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
err_of_dma_controller_register:
dma_async_device_unregister
(
&
atdma
->
dma_common
);
err_dma_async_device_register:
dma_pool_destroy
(
atdma
->
memset_pool
);
err_memset_pool_create:
dma_pool_destroy
(
atdma
->
dma_desc_pool
);
...
...
drivers/dma/at_hdmac_regs.h
View file @
3f134c95
...
...
@@ -186,13 +186,13 @@
/* LLI == Linked List Item; aka DMA buffer descriptor */
struct
at_lli
{
/* values that are not changed by hardware */
dma_addr_t
saddr
;
dma_addr_t
daddr
;
u32
saddr
;
u32
daddr
;
/* value that may get written back: */
u32
ctrla
;
/* more values that are not changed by hardware */
u32
ctrlb
;
dma_addr_t
dscr
;
/* chain to next lli */
u32
dscr
;
/* chain to next lli */
};
/**
...
...
drivers/dma/idxd/cdev.c
View file @
3f134c95
...
...
@@ -312,6 +312,24 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
if
(
idxd
->
state
!=
IDXD_DEV_ENABLED
)
return
-
ENXIO
;
/*
* User type WQ is enabled only when SVA is enabled for two reasons:
* - If no IOMMU or IOMMU Passthrough without SVA, userspace
* can directly access physical address through the WQ.
* - The IDXD cdev driver does not provide any ways to pin
* user pages and translate the address from user VA to IOVA or
* PA without IOMMU SVA. Therefore the application has no way
* to instruct the device to perform DMA function. This makes
* the cdev not usable for normal application usage.
*/
if
(
!
device_user_pasid_enabled
(
idxd
))
{
idxd
->
cmd_status
=
IDXD_SCMD_WQ_USER_NO_IOMMU
;
dev_dbg
(
&
idxd
->
pdev
->
dev
,
"User type WQ cannot be enabled without SVA.
\n
"
);
return
-
EOPNOTSUPP
;
}
mutex_lock
(
&
wq
->
wq_lock
);
wq
->
type
=
IDXD_WQT_USER
;
rc
=
drv_enable_wq
(
wq
);
...
...
drivers/dma/idxd/device.c
View file @
3f134c95
...
...
@@ -390,7 +390,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
clear_bit
(
WQ_FLAG_ATS_DISABLE
,
&
wq
->
flags
);
memset
(
wq
->
name
,
0
,
WQ_NAME_SIZE
);
wq
->
max_xfer_bytes
=
WQ_DEFAULT_MAX_XFER
;
wq
->
max_batch_size
=
WQ_DEFAULT_MAX_BATCH
;
idxd_wq_set_max_batch_size
(
idxd
->
data
->
type
,
wq
,
WQ_DEFAULT_MAX_BATCH
)
;
if
(
wq
->
opcap_bmap
)
bitmap_copy
(
wq
->
opcap_bmap
,
idxd
->
opcap_bmap
,
IDXD_MAX_OPCAP_BITS
);
}
...
...
@@ -730,13 +730,21 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
void
idxd_device_clear_state
(
struct
idxd_device
*
idxd
)
{
if
(
!
test_bit
(
IDXD_FLAG_CONFIGURABLE
,
&
idxd
->
flags
))
return
;
/* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */
if
(
test_bit
(
IDXD_FLAG_CONFIGURABLE
,
&
idxd
->
flags
))
{
/*
* Clearing wq state is protected by wq lock.
* So no need to be protected by device lock.
*/
idxd_device_wqs_clear_state
(
idxd
);
spin_lock
(
&
idxd
->
dev_lock
);
idxd_groups_clear_state
(
idxd
);
idxd_engines_clear_state
(
idxd
);
}
else
{
spin_lock
(
&
idxd
->
dev_lock
);
}
idxd
->
state
=
IDXD_DEV_DISABLED
;
spin_unlock
(
&
idxd
->
dev_lock
);
}
...
...
@@ -869,7 +877,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
/* bytes 12-15 */
wq
->
wqcfg
->
max_xfer_shift
=
ilog2
(
wq
->
max_xfer_bytes
);
wq
->
wqcfg
->
max_batch_shift
=
ilog2
(
wq
->
max_batch_size
);
idxd_wqcfg_set_max_batch_shift
(
idxd
->
data
->
type
,
wq
->
wqcfg
,
ilog2
(
wq
->
max_batch_size
)
);
/* bytes 32-63 */
if
(
idxd
->
hw
.
wq_cap
.
op_config
&&
wq
->
opcap_bmap
)
{
...
...
@@ -1051,7 +1059,7 @@ static int idxd_wq_load_config(struct idxd_wq *wq)
wq
->
priority
=
wq
->
wqcfg
->
priority
;
wq
->
max_xfer_bytes
=
1ULL
<<
wq
->
wqcfg
->
max_xfer_shift
;
wq
->
max_batch_size
=
1ULL
<<
wq
->
wqcfg
->
max_batch_shift
;
idxd_wq_set_max_batch_size
(
idxd
->
data
->
type
,
wq
,
1U
<<
wq
->
wqcfg
->
max_batch_shift
)
;
for
(
i
=
0
;
i
<
WQCFG_STRIDES
(
idxd
);
i
++
)
{
wqcfg_offset
=
WQCFG_OFFSET
(
idxd
,
wq
->
id
,
i
);
...
...
drivers/dma/idxd/idxd.h
View file @
3f134c95
...
...
@@ -548,6 +548,38 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq)
return
wq
->
client_count
;
};
/*
* Intel IAA does not support batch processing.
* The max batch size of device, max batch size of wq and
* max batch shift of wqcfg should be always 0 on IAA.
*/
static
inline
void
idxd_set_max_batch_size
(
int
idxd_type
,
struct
idxd_device
*
idxd
,
u32
max_batch_size
)
{
if
(
idxd_type
==
IDXD_TYPE_IAX
)
idxd
->
max_batch_size
=
0
;
else
idxd
->
max_batch_size
=
max_batch_size
;
}
static
inline
void
idxd_wq_set_max_batch_size
(
int
idxd_type
,
struct
idxd_wq
*
wq
,
u32
max_batch_size
)
{
if
(
idxd_type
==
IDXD_TYPE_IAX
)
wq
->
max_batch_size
=
0
;
else
wq
->
max_batch_size
=
max_batch_size
;
}
static
inline
void
idxd_wqcfg_set_max_batch_shift
(
int
idxd_type
,
union
wqcfg
*
wqcfg
,
u32
max_batch_shift
)
{
if
(
idxd_type
==
IDXD_TYPE_IAX
)
wqcfg
->
max_batch_shift
=
0
;
else
wqcfg
->
max_batch_shift
=
max_batch_shift
;
}
int
__must_check
__idxd_driver_register
(
struct
idxd_device_driver
*
idxd_drv
,
struct
module
*
module
,
const
char
*
mod_name
);
#define idxd_driver_register(driver) \
...
...
drivers/dma/idxd/init.c
View file @
3f134c95
...
...
@@ -183,7 +183,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
init_completion
(
&
wq
->
wq_dead
);
init_completion
(
&
wq
->
wq_resurrect
);
wq
->
max_xfer_bytes
=
WQ_DEFAULT_MAX_XFER
;
wq
->
max_batch_size
=
WQ_DEFAULT_MAX_BATCH
;
idxd_wq_set_max_batch_size
(
idxd
->
data
->
type
,
wq
,
WQ_DEFAULT_MAX_BATCH
)
;
wq
->
enqcmds_retries
=
IDXD_ENQCMDS_RETRIES
;
wq
->
wqcfg
=
kzalloc_node
(
idxd
->
wqcfg_size
,
GFP_KERNEL
,
dev_to_node
(
dev
));
if
(
!
wq
->
wqcfg
)
{
...
...
@@ -418,7 +418,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
idxd
->
max_xfer_bytes
=
1ULL
<<
idxd
->
hw
.
gen_cap
.
max_xfer_shift
;
dev_dbg
(
dev
,
"max xfer size: %llu bytes
\n
"
,
idxd
->
max_xfer_bytes
);
idxd
->
max_batch_size
=
1U
<<
idxd
->
hw
.
gen_cap
.
max_batch_shift
;
idxd
_set_max_batch_size
(
idxd
->
data
->
type
,
idxd
,
1U
<<
idxd
->
hw
.
gen_cap
.
max_batch_shift
)
;
dev_dbg
(
dev
,
"max batch size: %u
\n
"
,
idxd
->
max_batch_size
);
if
(
idxd
->
hw
.
gen_cap
.
config_en
)
set_bit
(
IDXD_FLAG_CONFIGURABLE
,
&
idxd
->
flags
);
...
...
drivers/dma/idxd/sysfs.c
View file @
3f134c95
...
...
@@ -1065,7 +1065,7 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
if
(
batch_size
>
idxd
->
max_batch_size
)
return
-
EINVAL
;
wq
->
max_batch_size
=
(
u32
)
batch_size
;
idxd_wq_set_max_batch_size
(
idxd
->
data
->
type
,
wq
,
(
u32
)
batch_size
)
;
return
count
;
}
...
...
drivers/dma/mv_xor_v2.c
View file @
3f134c95
...
...
@@ -893,6 +893,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
tasklet_kill
(
&
xor_dev
->
irq_tasklet
);
clk_disable_unprepare
(
xor_dev
->
clk
);
clk_disable_unprepare
(
xor_dev
->
reg_clk
);
return
0
;
}
...
...
drivers/dma/pxa_dma.c
View file @
3f134c95
...
...
@@ -1247,14 +1247,14 @@ static int pxad_init_phys(struct platform_device *op,
return
-
ENOMEM
;
for
(
i
=
0
;
i
<
nb_phy_chans
;
i
++
)
if
(
platform_get_irq
(
op
,
i
)
>
0
)
if
(
platform_get_irq
_optional
(
op
,
i
)
>
0
)
nr_irq
++
;
for
(
i
=
0
;
i
<
nb_phy_chans
;
i
++
)
{
phy
=
&
pdev
->
phys
[
i
];
phy
->
base
=
pdev
->
base
;
phy
->
idx
=
i
;
irq
=
platform_get_irq
(
op
,
i
);
irq
=
platform_get_irq
_optional
(
op
,
i
);
if
((
nr_irq
>
1
)
&&
(
irq
>
0
))
ret
=
devm_request_irq
(
&
op
->
dev
,
irq
,
pxad_chan_handler
,
...
...
drivers/dma/stm32-dma.c
View file @
3f134c95
...
...
@@ -675,6 +675,8 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
chan
->
chan_reg
.
dma_sndtr
=
stm32_dma_read
(
dmadev
,
STM32_DMA_SNDTR
(
chan
->
id
));
chan
->
status
=
DMA_PAUSED
;
dev_dbg
(
chan2dev
(
chan
),
"vchan %pK: paused
\n
"
,
&
chan
->
vchan
);
}
...
...
@@ -789,9 +791,7 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
if
(
status
&
STM32_DMA_TCI
)
{
stm32_dma_irq_clear
(
chan
,
STM32_DMA_TCI
);
if
(
scr
&
STM32_DMA_SCR_TCIE
)
{
if
(
chan
->
status
==
DMA_PAUSED
&&
!
(
scr
&
STM32_DMA_SCR_EN
))
stm32_dma_handle_chan_paused
(
chan
);
else
if
(
chan
->
status
!=
DMA_PAUSED
)
stm32_dma_handle_chan_done
(
chan
,
scr
);
}
status
&=
~
STM32_DMA_TCI
;
...
...
@@ -838,13 +838,11 @@ static int stm32_dma_pause(struct dma_chan *c)
return
-
EPERM
;
spin_lock_irqsave
(
&
chan
->
vchan
.
lock
,
flags
);
ret
=
stm32_dma_disable_chan
(
chan
);
/*
* A transfer complete flag is set to indicate the end of transfer due to the stream
* interruption, so wait for interrupt
*/
if
(
!
ret
)
chan
->
status
=
DMA_PAUSED
;
stm32_dma_handle_chan_paused
(
chan
);
spin_unlock_irqrestore
(
&
chan
->
vchan
.
lock
,
flags
);
return
ret
;
...
...
drivers/dma/stm32-mdma.c
View file @
3f134c95
...
...
@@ -1539,6 +1539,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec,
return
NULL
;
}
memset
(
&
config
,
0
,
sizeof
(
config
));
config
.
request
=
dma_spec
->
args
[
0
];
config
.
priority_level
=
dma_spec
->
args
[
1
];
config
.
transfer_config
=
dma_spec
->
args
[
2
];
...
...
drivers/dma/ti/k3-udma-glue.c
View file @
3f134c95
...
...
@@ -300,6 +300,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret
=
device_register
(
&
tx_chn
->
common
.
chan_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"Channel Device registration failed %d
\n
"
,
ret
);
put_device
(
&
tx_chn
->
common
.
chan_dev
);
tx_chn
->
common
.
chan_dev
.
parent
=
NULL
;
goto
err
;
}
...
...
@@ -918,6 +919,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
ret
=
device_register
(
&
rx_chn
->
common
.
chan_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"Channel Device registration failed %d
\n
"
,
ret
);
put_device
(
&
rx_chn
->
common
.
chan_dev
);
rx_chn
->
common
.
chan_dev
.
parent
=
NULL
;
goto
err
;
}
...
...
@@ -1049,6 +1051,7 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
ret
=
device_register
(
&
rx_chn
->
common
.
chan_dev
);
if
(
ret
)
{
dev_err
(
dev
,
"Channel Device registration failed %d
\n
"
,
ret
);
put_device
(
&
rx_chn
->
common
.
chan_dev
);
rx_chn
->
common
.
chan_dev
.
parent
=
NULL
;
goto
err
;
}
...
...
include/uapi/linux/idxd.h
View file @
3f134c95
...
...
@@ -29,6 +29,7 @@ enum idxd_scmd_stat {
IDXD_SCMD_WQ_NO_SIZE
=
0x800e0000
,
IDXD_SCMD_WQ_NO_PRIV
=
0x800f0000
,
IDXD_SCMD_WQ_IRQ_ERR
=
0x80100000
,
IDXD_SCMD_WQ_USER_NO_IOMMU
=
0x80110000
,
};
#define IDXD_SCMD_SOFTERR_MASK 0x80000000
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment