Commit 692ed5d4 authored by Meadhbh Fitzpatrick's avatar Meadhbh Fitzpatrick Committed by Herbert Xu

crypto: qat - fix spelling mistakes from 'bufer' to 'buffer'

Fix spelling mistakes from 'bufer' to 'buffer' in qat_common.
Also fix indentation issue caused by the spelling change.
Signed-off-by: default avatarMeadhbh Fitzpatrick <meadhbh.fitzpatrick@intel.com>
Reviewed-by: default avatarIlpo Järvinen <ilpo.jarvinen@linux.intel.com>
Reviewed-by: default avatarGiovanni Cabiddu <giovanni.cabiddu@intel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 00bef64a
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) #define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) #define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
/* Minimum ring bufer size for memory allocation */ /* Minimum ring buffer size for memory allocation */
#define ADF_RING_SIZE_BYTES_MIN(SIZE) \ #define ADF_RING_SIZE_BYTES_MIN(SIZE) \
((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \ ((SIZE < ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K)) ? \
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE) ADF_SIZE_TO_RING_SIZE_IN_BYTES(ADF_RING_SIZE_4K) : SIZE)
......
...@@ -26,8 +26,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev, ...@@ -26,8 +26,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for (i = 0; i < bl->num_bufs; i++) for (i = 0; i < bl->num_bufs; i++)
dma_unmap_single(dev, bl->bufers[i].addr, dma_unmap_single(dev, bl->buffers[i].addr,
bl->bufers[i].len, bl_dma_dir); bl->buffers[i].len, bl_dma_dir);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
...@@ -36,8 +36,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev, ...@@ -36,8 +36,8 @@ void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
if (blp != blpout) { if (blp != blpout) {
for (i = 0; i < blout->num_mapped_bufs; i++) { for (i = 0; i < blout->num_mapped_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr, dma_unmap_single(dev, blout->buffers[i].addr,
blout->bufers[i].len, blout->buffers[i].len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
...@@ -63,7 +63,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, ...@@ -63,7 +63,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
dma_addr_t blp = DMA_MAPPING_ERROR; dma_addr_t blp = DMA_MAPPING_ERROR;
dma_addr_t bloutp = DMA_MAPPING_ERROR; dma_addr_t bloutp = DMA_MAPPING_ERROR;
struct scatterlist *sg; struct scatterlist *sg;
size_t sz_out, sz = struct_size(bufl, bufers, n); size_t sz_out, sz = struct_size(bufl, buffers, n);
int node = dev_to_node(&GET_DEV(accel_dev)); int node = dev_to_node(&GET_DEV(accel_dev));
int bufl_dma_dir; int bufl_dma_dir;
...@@ -86,7 +86,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, ...@@ -86,7 +86,7 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
bufl->bufers[i].addr = DMA_MAPPING_ERROR; bufl->buffers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sgl, sg, n, i) { for_each_sg(sgl, sg, n, i) {
int y = sg_nctr; int y = sg_nctr;
...@@ -94,11 +94,11 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, ...@@ -94,11 +94,11 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (!sg->length) if (!sg->length)
continue; continue;
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length, sg->length,
bufl_dma_dir); bufl_dma_dir);
bufl->bufers[y].len = sg->length; bufl->buffers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
goto err_in; goto err_in;
sg_nctr++; sg_nctr++;
} }
...@@ -111,12 +111,12 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, ...@@ -111,12 +111,12 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
buf->sz = sz; buf->sz = sz;
/* Handle out of place operation */ /* Handle out of place operation */
if (sgl != sglout) { if (sgl != sglout) {
struct qat_alg_buf *bufers; struct qat_alg_buf *buffers;
int extra_buff = extra_dst_buff ? 1 : 0; int extra_buff = extra_dst_buff ? 1 : 0;
int n_sglout = sg_nents(sglout); int n_sglout = sg_nents(sglout);
n = n_sglout + extra_buff; n = n_sglout + extra_buff;
sz_out = struct_size(buflout, bufers, n); sz_out = struct_size(buflout, buffers, n);
sg_nctr = 0; sg_nctr = 0;
if (n > QAT_MAX_BUFF_DESC) { if (n > QAT_MAX_BUFF_DESC) {
...@@ -129,9 +129,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, ...@@ -129,9 +129,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
buf->sgl_dst_valid = true; buf->sgl_dst_valid = true;
} }
bufers = buflout->bufers; buffers = buflout->buffers;
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
bufers[i].addr = DMA_MAPPING_ERROR; buffers[i].addr = DMA_MAPPING_ERROR;
for_each_sg(sglout, sg, n_sglout, i) { for_each_sg(sglout, sg, n_sglout, i) {
int y = sg_nctr; int y = sg_nctr;
...@@ -139,17 +139,17 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, ...@@ -139,17 +139,17 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
if (!sg->length) if (!sg->length)
continue; continue;
bufers[y].addr = dma_map_single(dev, sg_virt(sg), buffers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length, sg->length,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(dev, bufers[y].addr))) if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
goto err_out; goto err_out;
bufers[y].len = sg->length; buffers[y].len = sg->length;
sg_nctr++; sg_nctr++;
} }
if (extra_buff) { if (extra_buff) {
bufers[sg_nctr].addr = extra_dst_buff; buffers[sg_nctr].addr = extra_dst_buff;
bufers[sg_nctr].len = sz_extra_dst_buff; buffers[sg_nctr].len = sz_extra_dst_buff;
} }
buflout->num_bufs = sg_nctr; buflout->num_bufs = sg_nctr;
...@@ -174,11 +174,11 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, ...@@ -174,11 +174,11 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
n = sg_nents(sglout); n = sg_nents(sglout);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (buflout->bufers[i].addr == extra_dst_buff) if (buflout->buffers[i].addr == extra_dst_buff)
break; break;
if (!dma_mapping_error(dev, buflout->bufers[i].addr)) if (!dma_mapping_error(dev, buflout->buffers[i].addr))
dma_unmap_single(dev, buflout->bufers[i].addr, dma_unmap_single(dev, buflout->buffers[i].addr,
buflout->bufers[i].len, buflout->buffers[i].len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
...@@ -191,9 +191,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev, ...@@ -191,9 +191,9 @@ static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
n = sg_nents(sgl); n = sg_nents(sgl);
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr)) if (!dma_mapping_error(dev, bufl->buffers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr, dma_unmap_single(dev, bufl->buffers[i].addr,
bufl->bufers[i].len, bufl->buffers[i].len,
bufl_dma_dir); bufl_dma_dir);
if (!buf->sgl_src_valid) if (!buf->sgl_src_valid)
...@@ -231,9 +231,9 @@ static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev, ...@@ -231,9 +231,9 @@ static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
int i; int i;
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bl->bufers[i].addr)) if (!dma_mapping_error(dev, bl->buffers[i].addr))
dma_unmap_single(dev, bl->bufers[i].addr, dma_unmap_single(dev, bl->buffers[i].addr,
bl->bufers[i].len, DMA_FROM_DEVICE); bl->buffers[i].len, DMA_FROM_DEVICE);
} }
static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
...@@ -248,13 +248,13 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, ...@@ -248,13 +248,13 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
size_t sz; size_t sz;
n = sg_nents(sgl); n = sg_nents(sgl);
sz = struct_size(bufl, bufers, n); sz = struct_size(bufl, buffers, n);
bufl = kzalloc_node(sz, GFP_KERNEL, node); bufl = kzalloc_node(sz, GFP_KERNEL, node);
if (unlikely(!bufl)) if (unlikely(!bufl))
return -ENOMEM; return -ENOMEM;
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
bufl->bufers[i].addr = DMA_MAPPING_ERROR; bufl->buffers[i].addr = DMA_MAPPING_ERROR;
sg_nctr = 0; sg_nctr = 0;
for_each_sg(sgl, sg, n, i) { for_each_sg(sgl, sg, n, i) {
...@@ -263,11 +263,11 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, ...@@ -263,11 +263,11 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
if (!sg->length) if (!sg->length)
continue; continue;
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg), bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length, sg->length,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
bufl->bufers[y].len = sg->length; bufl->buffers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr))) if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
goto err_map; goto err_map;
sg_nctr++; sg_nctr++;
} }
...@@ -280,9 +280,9 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev, ...@@ -280,9 +280,9 @@ static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
err_map: err_map:
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
if (!dma_mapping_error(dev, bufl->bufers[i].addr)) if (!dma_mapping_error(dev, bufl->buffers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr, dma_unmap_single(dev, bufl->buffers[i].addr,
bufl->bufers[i].len, bufl->buffers[i].len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
kfree(bufl); kfree(bufl);
*bl = NULL; *bl = NULL;
...@@ -351,7 +351,7 @@ int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev, ...@@ -351,7 +351,7 @@ int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
if (ret) if (ret)
return ret; return ret;
new_bl_size = struct_size(new_bl, bufers, new_bl->num_bufs); new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
/* Map new firmware SGL descriptor */ /* Map new firmware SGL descriptor */
new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE); new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
......
...@@ -18,7 +18,7 @@ struct qat_alg_buf_list { ...@@ -18,7 +18,7 @@ struct qat_alg_buf_list {
u64 resrvd; u64 resrvd;
u32 num_bufs; u32 num_bufs;
u32 num_mapped_bufs; u32 num_mapped_bufs;
struct qat_alg_buf bufers[]; struct qat_alg_buf buffers[];
} __packed; } __packed;
struct qat_alg_fixed_buf_list { struct qat_alg_fixed_buf_list {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment