Commit 47ec7f09 authored by Dave Jiang's avatar Dave Jiang Committed by Vinod Koul

dmaengine: cookie bypass for out of order completion

The cookie tracking in dmaengine expects all submissions completed in
order. Some DMA devices like Intel DSA can complete submissions out of
order, especially if configured with a work queue sharing multiple DMA
engines. Add a status DMA_OUT_OF_ORDER that tx_status can be returned for
those DMA devices. The user should use callbacks to track the completion
rather than the DMA cookie. This would address the issue of dmatest
complaining that descriptors are "busy" when the cookie count goes
backwards due to out of order completion. Add DMA_COMPLETION_NO_ORDER
DMA capability to allow the driver to flag the device's ability to complete
operations out of order.
Reported-by: default avatarSwathi Kovvuri <swathi.kovvuri@intel.com>
Signed-off-by: default avatarDave Jiang <dave.jiang@intel.com>
Tested-by: default avatarSwathi Kovvuri <swathi.kovvuri@intel.com>
Link: https://lore.kernel.org/r/158939557151.20335.12404113976045569870.stgit@djiang5-desk3.ch.intel.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent c09a7ce6
...@@ -239,6 +239,22 @@ Currently, the types available are: ...@@ -239,6 +239,22 @@ Currently, the types available are:
want to transfer a portion of uncompressed data directly to the want to transfer a portion of uncompressed data directly to the
display to print it display to print it
- DMA_COMPLETION_NO_ORDER
- The device does not support in order completion.
- The driver should return DMA_OUT_OF_ORDER for device_tx_status if
the device is setting this capability.
- All cookie tracking and checking API should be treated as invalid if
the device exports this capability.
- At this point, this is incompatible with polling option for dmatest.
- If this cap is set, the user is recommended to provide an unique
identifier for each descriptor sent to the DMA device in order to
properly track the completion.
These various types will also affect how the source and destination These various types will also affect how the source and destination
addresses change over time. addresses change over time.
...@@ -399,6 +415,9 @@ supported. ...@@ -399,6 +415,9 @@ supported.
- In the case of a cyclic transfer, it should only take into - In the case of a cyclic transfer, it should only take into
account the current period. account the current period.
- Should return DMA_OUT_OF_ORDER if the device does not support in order
completion and is completing the operation out of order.
- This function can be called in an interrupt context. - This function can be called in an interrupt context.
- device_config - device_config
......
...@@ -829,7 +829,10 @@ static int dmatest_func(void *data) ...@@ -829,7 +829,10 @@ static int dmatest_func(void *data)
result("test timed out", total_tests, src->off, dst->off, result("test timed out", total_tests, src->off, dst->off,
len, 0); len, 0);
goto error_unmap_continue; goto error_unmap_continue;
} else if (status != DMA_COMPLETE) { } else if (status != DMA_COMPLETE &&
!(dma_has_cap(DMA_COMPLETION_NO_ORDER,
dev->cap_mask) &&
status == DMA_OUT_OF_ORDER)) {
result(status == DMA_ERROR ? result(status == DMA_ERROR ?
"completion error status" : "completion error status" :
"completion busy status", total_tests, src->off, "completion busy status", total_tests, src->off,
...@@ -1007,6 +1010,12 @@ static int dmatest_add_channel(struct dmatest_info *info, ...@@ -1007,6 +1010,12 @@ static int dmatest_add_channel(struct dmatest_info *info,
dtc->chan = chan; dtc->chan = chan;
INIT_LIST_HEAD(&dtc->threads); INIT_LIST_HEAD(&dtc->threads);
if (dma_has_cap(DMA_COMPLETION_NO_ORDER, dma_dev->cap_mask) &&
info->params.polled) {
info->params.polled = false;
pr_warn("DMA_COMPLETION_NO_ORDER, polled disabled\n");
}
if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) {
if (dmatest == 0) { if (dmatest == 0) {
cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY);
......
...@@ -133,7 +133,7 @@ static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan, ...@@ -133,7 +133,7 @@ static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
dma_cookie_t cookie, dma_cookie_t cookie,
struct dma_tx_state *txstate) struct dma_tx_state *txstate)
{ {
return dma_cookie_status(dma_chan, cookie, txstate); return DMA_OUT_OF_ORDER;
} }
/* /*
...@@ -174,6 +174,7 @@ int idxd_register_dma_device(struct idxd_device *idxd) ...@@ -174,6 +174,7 @@ int idxd_register_dma_device(struct idxd_device *idxd)
INIT_LIST_HEAD(&dma->channels); INIT_LIST_HEAD(&dma->channels);
dma->dev = &idxd->pdev->dev; dma->dev = &idxd->pdev->dev;
dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
dma->device_release = idxd_dma_release; dma->device_release = idxd_dma_release;
if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
......
...@@ -39,6 +39,7 @@ enum dma_status { ...@@ -39,6 +39,7 @@ enum dma_status {
DMA_IN_PROGRESS, DMA_IN_PROGRESS,
DMA_PAUSED, DMA_PAUSED,
DMA_ERROR, DMA_ERROR,
DMA_OUT_OF_ORDER,
}; };
/** /**
...@@ -61,6 +62,7 @@ enum dma_transaction_type { ...@@ -61,6 +62,7 @@ enum dma_transaction_type {
DMA_SLAVE, DMA_SLAVE,
DMA_CYCLIC, DMA_CYCLIC,
DMA_INTERLEAVE, DMA_INTERLEAVE,
DMA_COMPLETION_NO_ORDER,
/* last transaction type for creation of the capabilities mask */ /* last transaction type for creation of the capabilities mask */
DMA_TX_TYPE_END, DMA_TX_TYPE_END,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment