Commit 81e6eb85 authored by NeilBrown's avatar NeilBrown Committed by Luis Henriques

async_tx: use GFP_NOWAIT rather than GFP_IO

commit b02bab6b upstream.

These async_XX functions are called from md/raid5 in an atomic
section, between get_cpu() and put_cpu(), so they must not sleep.
So use GFP_NOWAIT rather than GFP_IO.

Dan Williams writes: Longer term async_tx needs to be merged into md
directly as we can allocate this unmap data statically per-stripe
rather than per request.

Fixed: 7476bd79 ("async_pq: convert to dmaengine_unmap_data")
Reported-and-tested-by: default avatarStanislav Samsonov <slava@annapurnalabs.com>
Acked-by: default avatarDan Williams <dan.j.williams@intel.com>
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
Signed-off-by: default avatarLuis Henriques <luis.henriques@canonical.com>
parent a366bc60
...@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, ...@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dmaengine_unmap_data *unmap = NULL; struct dmaengine_unmap_data *unmap = NULL;
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
unsigned long dma_prep_flags = 0; unsigned long dma_prep_flags = 0;
......
...@@ -176,7 +176,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, ...@@ -176,7 +176,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
if (unmap && if (unmap &&
(src_cnt <= dma_maxpq(device, 0) || (src_cnt <= dma_maxpq(device, 0) ||
...@@ -294,7 +294,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, ...@@ -294,7 +294,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
BUG_ON(disks < 4); BUG_ON(disks < 4);
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
if (unmap && disks <= dma_maxpq(device, 0) && if (unmap && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) { is_dma_pq_aligned(device, offset, 0, len)) {
......
...@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, ...@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
u8 *a, *b, *c; u8 *a, *b, *c;
if (dma) if (dma)
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) { if (unmap) {
struct device *dev = dma->dev; struct device *dev = dma->dev;
...@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, ...@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
u8 *d, *s; u8 *d, *s;
if (dma) if (dma)
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) { if (unmap) {
dma_addr_t dma_dest[2]; dma_addr_t dma_dest[2];
......
...@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, ...@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
BUG_ON(src_cnt <= 1); BUG_ON(src_cnt <= 1);
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
...@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, ...@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
BUG_ON(src_cnt <= 1); BUG_ON(src_cnt <= 1);
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
if (unmap && src_cnt <= device->max_xor && if (unmap && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) { is_dma_xor_aligned(device, offset, 0, len)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment