Commit 669ab0b2 authored by Dan Williams's avatar Dan Williams

async_tx: fix handling of the "out of descriptor" condition in async_xor

Ensure forward progress is made when a dmaengine driver is unable to
allocate an xor descriptor by breaking the dependency chain with
async_tx_quisce() and issue any pending descriptors.

Tested with iop-adma by setting device->max_xor = 2 to force multiple
calls to device_prep_dma_xor for each call to async_xor and limiting the
descriptor slot pool to 5.  Discovered that the minimum descriptor pool
size for iop-adma is 2 * iop_chan_xor_slot_cnt(device->max_xor) + 1.
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 1e55db2d
...@@ -30,6 +30,24 @@ ...@@ -30,6 +30,24 @@
#include <linux/raid/xor.h> #include <linux/raid/xor.h>
#include <linux/async_tx.h> #include <linux/async_tx.h>
/**
* async_tx_quiesce - ensure tx is complete and freeable upon return
* @tx - transaction to quiesce
*/
static void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
if (*tx) {
/* if ack is already set then we cannot be sure
* we are referring to the correct operation
*/
BUG_ON(async_tx_test_ack(*tx));
if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
panic("DMA_ERROR waiting for transaction\n");
async_tx_ack(*tx);
*tx = NULL;
}
}
/* do_async_xor - dma map the pages and perform the xor with an engine. /* do_async_xor - dma map the pages and perform the xor with an engine.
* This routine is marked __always_inline so it can be compiled away * This routine is marked __always_inline so it can be compiled away
* when CONFIG_DMA_ENGINE=n * when CONFIG_DMA_ENGINE=n
...@@ -85,15 +103,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, ...@@ -85,15 +103,17 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off], tx = dma->device_prep_dma_xor(chan, dma_dest, &dma_src[src_off],
xor_src_cnt, len, dma_flags); xor_src_cnt, len, dma_flags);
if (unlikely(!tx && depend_tx)) if (unlikely(!tx))
dma_wait_for_async_tx(depend_tx); async_tx_quiesce(&depend_tx);
/* spin wait for the preceeding transactions to complete */ /* spin wait for the preceeding transactions to complete */
while (unlikely(!tx)) while (unlikely(!tx)) {
dma_async_issue_pending(chan);
tx = dma->device_prep_dma_xor(chan, dma_dest, tx = dma->device_prep_dma_xor(chan, dma_dest,
&dma_src[src_off], &dma_src[src_off],
xor_src_cnt, len, xor_src_cnt, len,
dma_flags); dma_flags);
}
async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn,
_cb_param); _cb_param);
...@@ -267,11 +287,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, ...@@ -267,11 +287,11 @@ async_xor_zero_sum(struct page *dest, struct page **src_list,
tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
len, result, len, result,
dma_prep_flags); dma_prep_flags);
if (!tx) { if (unlikely(!tx)) {
if (depend_tx) async_tx_quiesce(&depend_tx);
dma_wait_for_async_tx(depend_tx);
while (!tx) while (!tx)
dma_async_issue_pending(chan);
tx = device->device_prep_dma_zero_sum(chan, tx = device->device_prep_dma_zero_sum(chan,
dma_src, src_cnt, len, result, dma_src, src_cnt, len, result,
dma_prep_flags); dma_prep_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment