Commit 7b3cc2b1 authored by Dan Williams's avatar Dan Williams

async_tx: build-time toggling of async_{syndrome,xor}_val dma support

ioat3.2 does not support asynchronous error notifications which makes
the driver experience latencies when non-zero pq validate results are
expected.  Provide a mechanism for turning off async_xor_val and
async_syndrome_val via Kconfig.  This approach is generally useful for
any driver that specifies ASYNC_TX_DISABLE_CHANNEL_SWITCH and would like
to force the async_tx api to fall back to the synchronous path for
certain operations.
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 4499a24d
...@@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV ...@@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV
select ASYNC_CORE select ASYNC_CORE
select ASYNC_PQ select ASYNC_PQ
config ASYNC_TX_DISABLE_PQ_VAL_DMA
bool
config ASYNC_TX_DISABLE_XOR_VAL_DMA
bool
...@@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, ...@@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
} }
EXPORT_SYMBOL_GPL(async_gen_syndrome); EXPORT_SYMBOL_GPL(async_gen_syndrome);
static inline struct dma_chan *
pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
{
#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
return NULL;
#endif
return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
disks, len);
}
/** /**
* async_syndrome_val - asynchronously validate a raid6 syndrome * async_syndrome_val - asynchronously validate a raid6 syndrome
* @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
...@@ -260,9 +270,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, ...@@ -260,9 +270,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
size_t len, enum sum_check_flags *pqres, struct page *spare, size_t len, enum sum_check_flags *pqres, struct page *spare,
struct async_submit_ctl *submit) struct async_submit_ctl *submit)
{ {
struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
NULL, 0, blocks, disks,
len);
struct dma_device *device = chan ? chan->device : NULL; struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
unsigned char coefs[disks-2]; unsigned char coefs[disks-2];
......
...@@ -234,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) ...@@ -234,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
memcmp(a, a + 4, len - 4) == 0); memcmp(a, a + 4, len - 4) == 0);
} }
static inline struct dma_chan *
xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
struct page **src_list, int src_cnt, size_t len)
{
#ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
return NULL;
#endif
return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
src_cnt, len);
}
/** /**
* async_xor_val - attempt a xor parity check with a dma engine. * async_xor_val - attempt a xor parity check with a dma engine.
* @dest: destination page used if the xor is performed synchronously * @dest: destination page used if the xor is performed synchronously
...@@ -255,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, ...@@ -255,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, enum sum_check_flags *result, int src_cnt, size_t len, enum sum_check_flags *result,
struct async_submit_ctl *submit) struct async_submit_ctl *submit)
{ {
struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
&dest, 1, src_list,
src_cnt, len);
struct dma_device *device = chan ? chan->device : NULL; struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx = NULL; struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t *dma_src = NULL; dma_addr_t *dma_src = NULL;
......
...@@ -26,6 +26,8 @@ config INTEL_IOATDMA ...@@ -26,6 +26,8 @@ config INTEL_IOATDMA
select DMA_ENGINE select DMA_ENGINE
select DCA select DCA
select ASYNC_TX_DISABLE_CHANNEL_SWITCH select ASYNC_TX_DISABLE_CHANNEL_SWITCH
select ASYNC_TX_DISABLE_PQ_VAL_DMA
select ASYNC_TX_DISABLE_XOR_VAL_DMA
help help
Enable support for the Intel(R) I/OAT DMA engine present Enable support for the Intel(R) I/OAT DMA engine present
in recent Intel Xeon chipsets. in recent Intel Xeon chipsets.
......
...@@ -632,16 +632,22 @@ static bool device_has_all_tx_types(struct dma_device *device) ...@@ -632,16 +632,22 @@ static bool device_has_all_tx_types(struct dma_device *device)
#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
if (!dma_has_cap(DMA_XOR, device->cap_mask)) if (!dma_has_cap(DMA_XOR, device->cap_mask))
return false; return false;
#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
return false; return false;
#endif #endif
#endif
#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
if (!dma_has_cap(DMA_PQ, device->cap_mask)) if (!dma_has_cap(DMA_PQ, device->cap_mask))
return false; return false;
#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
return false; return false;
#endif #endif
#endif
return true; return true;
} }
......
...@@ -1206,6 +1206,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) ...@@ -1206,6 +1206,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
device->timer_fn = ioat2_timer_event; device->timer_fn = ioat2_timer_event;
} }
#ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
dma->device_prep_dma_pq_val = NULL;
#endif
#ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
dma->device_prep_dma_xor_val = NULL;
#endif
/* -= IOAT ver.3 workarounds =- */ /* -= IOAT ver.3 workarounds =- */
/* Write CHANERRMSK_INT with 3E07h to mask out the errors /* Write CHANERRMSK_INT with 3E07h to mask out the errors
* that can cause stability issues for IOAT ver.3 * that can cause stability issues for IOAT ver.3
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment