Commit 5109c133 authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Greg Kroah-Hartman

crypto: ccree - remove special handling of chained sg

commit c4b22bf5 upstream.

We were handling chained scattergather lists with specialized code
needlessly as the regular sg APIs handle them just fine. The code
handling this also had an (unused) code path with a use-before-init
error, flagged by Coverity.

Remove all special handling of chained sg and leave their handling
to the regular sg APIs.
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Cc: stable@vger.kernel.org # v4.19+
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0e643cb0
...@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req, ...@@ -83,24 +83,17 @@ static void cc_copy_mac(struct device *dev, struct aead_request *req,
*/ */
static unsigned int cc_get_sgl_nents(struct device *dev, static unsigned int cc_get_sgl_nents(struct device *dev,
struct scatterlist *sg_list, struct scatterlist *sg_list,
unsigned int nbytes, u32 *lbytes, unsigned int nbytes, u32 *lbytes)
bool *is_chained)
{ {
unsigned int nents = 0; unsigned int nents = 0;
while (nbytes && sg_list) { while (nbytes && sg_list) {
if (sg_list->length) { nents++;
nents++; /* get the number of bytes in the last entry */
/* get the number of bytes in the last entry */ *lbytes = nbytes;
*lbytes = nbytes; nbytes -= (sg_list->length > nbytes) ?
nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length;
nbytes : sg_list->length; sg_list = sg_next(sg_list);
sg_list = sg_next(sg_list);
} else {
sg_list = (struct scatterlist *)sg_page(sg_list);
if (is_chained)
*is_chained = true;
}
} }
dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes);
return nents; return nents;
...@@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, ...@@ -142,7 +135,7 @@ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg,
{ {
u32 nents, lbytes; u32 nents, lbytes;
nents = cc_get_sgl_nents(dev, sg, end, &lbytes, NULL); nents = cc_get_sgl_nents(dev, sg, end, &lbytes);
sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip, sg_copy_buffer(sg, nents, (void *)dest, (end - to_skip + 1), to_skip,
(direct == CC_SG_TO_BUF)); (direct == CC_SG_TO_BUF));
} }
...@@ -314,40 +307,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, ...@@ -314,40 +307,10 @@ static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data,
sgl_data->num_of_buffers++; sgl_data->num_of_buffers++;
} }
static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
enum dma_data_direction direction)
{
u32 i, j;
struct scatterlist *l_sg = sg;
for (i = 0; i < nents; i++) {
if (!l_sg)
break;
if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
dev_err(dev, "dma_map_page() sg buffer failed\n");
goto err;
}
l_sg = sg_next(l_sg);
}
return nents;
err:
/* Restore mapped parts */
for (j = 0; j < i; j++) {
if (!sg)
break;
dma_unmap_sg(dev, sg, 1, direction);
sg = sg_next(sg);
}
return 0;
}
static int cc_map_sg(struct device *dev, struct scatterlist *sg, static int cc_map_sg(struct device *dev, struct scatterlist *sg,
unsigned int nbytes, int direction, u32 *nents, unsigned int nbytes, int direction, u32 *nents,
u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{ {
bool is_chained = false;
if (sg_is_last(sg)) { if (sg_is_last(sg)) {
/* One entry only case -set to DLLI */ /* One entry only case -set to DLLI */
if (dma_map_sg(dev, sg, 1, direction) != 1) { if (dma_map_sg(dev, sg, 1, direction) != 1) {
...@@ -361,35 +324,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg, ...@@ -361,35 +324,21 @@ static int cc_map_sg(struct device *dev, struct scatterlist *sg,
*nents = 1; *nents = 1;
*mapped_nents = 1; *mapped_nents = 1;
} else { /*sg_is_last*/ } else { /*sg_is_last*/
*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes, *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes);
&is_chained);
if (*nents > max_sg_nents) { if (*nents > max_sg_nents) {
*nents = 0; *nents = 0;
dev_err(dev, "Too many fragments. current %d max %d\n", dev_err(dev, "Too many fragments. current %d max %d\n",
*nents, max_sg_nents); *nents, max_sg_nents);
return -ENOMEM; return -ENOMEM;
} }
if (!is_chained) { /* In case of mmu the number of mapped nents might
/* In case of mmu the number of mapped nents might * be changed from the original sgl nents
* be changed from the original sgl nents */
*/ *mapped_nents = dma_map_sg(dev, sg, *nents, direction);
*mapped_nents = dma_map_sg(dev, sg, *nents, direction); if (*mapped_nents == 0) {
if (*mapped_nents == 0) { *nents = 0;
*nents = 0; dev_err(dev, "dma_map_sg() sg buffer failed\n");
dev_err(dev, "dma_map_sg() sg buffer failed\n"); return -ENOMEM;
return -ENOMEM;
}
} else {
/*In this case the driver maps entry by entry so it
* must have the same nents before and after map
*/
*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
direction);
if (*mapped_nents != *nents) {
*nents = *mapped_nents;
dev_err(dev, "dma_map_sg() sg buffer failed\n");
return -ENOMEM;
}
} }
} }
...@@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) ...@@ -571,7 +520,6 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev); struct cc_drvdata *drvdata = dev_get_drvdata(dev);
u32 dummy; u32 dummy;
bool chained;
u32 size_to_unmap = 0; u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) { if (areq_ctx->mac_buf_dma_addr) {
...@@ -636,15 +584,14 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) ...@@ -636,15 +584,14 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
size_to_unmap += crypto_aead_ivsize(tfm); size_to_unmap += crypto_aead_ivsize(tfm);
dma_unmap_sg(dev, req->src, dma_unmap_sg(dev, req->src,
cc_get_sgl_nents(dev, req->src, size_to_unmap, cc_get_sgl_nents(dev, req->src, size_to_unmap, &dummy),
&dummy, &chained),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (req->src != req->dst) { if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst)); sg_virt(req->dst));
dma_unmap_sg(dev, req->dst, dma_unmap_sg(dev, req->dst,
cc_get_sgl_nents(dev, req->dst, size_to_unmap, cc_get_sgl_nents(dev, req->dst, size_to_unmap,
&dummy, &chained), &dummy),
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
} }
if (drvdata->coherent && if (drvdata->coherent &&
...@@ -1022,7 +969,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, ...@@ -1022,7 +969,6 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
unsigned int size_for_map = req->assoclen + req->cryptlen; unsigned int size_for_map = req->assoclen + req->cryptlen;
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
u32 sg_index = 0; u32 sg_index = 0;
bool chained = false;
bool is_gcm4543 = areq_ctx->is_gcm4543; bool is_gcm4543 = areq_ctx->is_gcm4543;
u32 size_to_skip = req->assoclen; u32 size_to_skip = req->assoclen;
...@@ -1043,7 +989,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, ...@@ -1043,7 +989,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ?
authsize : 0; authsize : 0;
src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map,
&src_last_bytes, &chained); &src_last_bytes);
sg_index = areq_ctx->src_sgl->length; sg_index = areq_ctx->src_sgl->length;
//check where the data starts //check where the data starts
while (sg_index <= size_to_skip) { while (sg_index <= size_to_skip) {
...@@ -1083,7 +1029,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, ...@@ -1083,7 +1029,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
} }
dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map,
&dst_last_bytes, &chained); &dst_last_bytes);
sg_index = areq_ctx->dst_sgl->length; sg_index = areq_ctx->dst_sgl->length;
offset = size_to_skip; offset = size_to_skip;
...@@ -1484,7 +1430,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, ...@@ -1484,7 +1430,7 @@ int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx,
dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n",
curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]);
areq_ctx->in_nents = areq_ctx->in_nents =
cc_get_sgl_nents(dev, src, nbytes, &dummy, NULL); cc_get_sgl_nents(dev, src, nbytes, &dummy);
sg_copy_to_buffer(src, areq_ctx->in_nents, sg_copy_to_buffer(src, areq_ctx->in_nents,
&curr_buff[*curr_buff_cnt], nbytes); &curr_buff[*curr_buff_cnt], nbytes);
*curr_buff_cnt += nbytes; *curr_buff_cnt += nbytes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment