Commit 059d73ee authored by Horia Geantă's avatar Horia Geantă Committed by Herbert Xu

crypto: caam - use len instead of nents for bulding HW S/G table

Currently, conversion of SW S/G table into HW S/G layout relies on
nents returned by sg_nents_for_len(sg, len).
However this leaves the possibility of HW S/G referencing more data
then needed: since buffer length in HW S/G entries is filled using
sg_dma_len(sg), the last entry in HW S/G table might have a length
that is bigger than needed for the crypto request.

This way of S/G table conversion is fine, unless after converting a table
more entries have to be appended to the HW S/G table.
In this case, crypto engine would access data from the S/G entry having
the incorrect length, instead of advancing in the S/G table.
This situation doesn't exist, but the upcoming implementation of
IV update for skcipher algorithms needs to add a S/G entry after
req->dst S/G (corresponding to output IV).
Signed-off-by: default avatarHoria Geantă <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 1fa6d053
...@@ -1284,37 +1284,36 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -1284,37 +1284,36 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC; GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
int src_len, dst_len = 0;
struct aead_edesc *edesc; struct aead_edesc *edesc;
int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; int sec4_sg_index, sec4_sg_len, sec4_sg_bytes;
unsigned int authsize = ctx->authsize; unsigned int authsize = ctx->authsize;
if (unlikely(req->dst != req->src)) { if (unlikely(req->dst != req->src)) {
src_nents = sg_nents_for_len(req->src, req->assoclen + src_len = req->assoclen + req->cryptlen;
req->cryptlen); dst_len = src_len + (encrypt ? authsize : (-authsize));
src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) { if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
req->assoclen + req->cryptlen); src_len);
return ERR_PTR(src_nents); return ERR_PTR(src_nents);
} }
dst_nents = sg_nents_for_len(req->dst, req->assoclen + dst_nents = sg_nents_for_len(req->dst, dst_len);
req->cryptlen +
(encrypt ? authsize :
(-authsize)));
if (unlikely(dst_nents < 0)) { if (unlikely(dst_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
req->assoclen + req->cryptlen + dst_len);
(encrypt ? authsize : (-authsize)));
return ERR_PTR(dst_nents); return ERR_PTR(dst_nents);
} }
} else { } else {
src_nents = sg_nents_for_len(req->src, req->assoclen + src_len = req->assoclen + req->cryptlen +
req->cryptlen + (encrypt ? authsize : 0);
(encrypt ? authsize : 0));
src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) { if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
req->assoclen + req->cryptlen + src_len);
(encrypt ? authsize : 0));
return ERR_PTR(src_nents); return ERR_PTR(src_nents);
} }
} }
...@@ -1386,12 +1385,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -1386,12 +1385,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
sec4_sg_index = 0; sec4_sg_index = 0;
if (mapped_src_nents > 1) { if (mapped_src_nents > 1) {
sg_to_sec4_sg_last(req->src, mapped_src_nents, sg_to_sec4_sg_last(req->src, src_len,
edesc->sec4_sg + sec4_sg_index, 0); edesc->sec4_sg + sec4_sg_index, 0);
sec4_sg_index += mapped_src_nents; sec4_sg_index += mapped_src_nents;
} }
if (mapped_dst_nents > 1) { if (mapped_dst_nents > 1) {
sg_to_sec4_sg_last(req->dst, mapped_dst_nents, sg_to_sec4_sg_last(req->dst, dst_len,
edesc->sec4_sg + sec4_sg_index, 0); edesc->sec4_sg + sec4_sg_index, 0);
} }
...@@ -1756,11 +1755,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1756,11 +1755,11 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0);
} }
if (dst_sg_idx) if (dst_sg_idx)
sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg + sg_to_sec4_sg_last(req->src, req->cryptlen, edesc->sec4_sg +
!!ivsize, 0); !!ivsize, 0);
if (mapped_dst_nents > 1) { if (mapped_dst_nents > 1) {
sg_to_sec4_sg_last(req->dst, mapped_dst_nents, sg_to_sec4_sg_last(req->dst, req->cryptlen,
edesc->sec4_sg + dst_sg_idx, 0); edesc->sec4_sg + dst_sg_idx, 0);
} }
......
...@@ -917,6 +917,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -917,6 +917,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC; GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
int src_len, dst_len = 0;
struct aead_edesc *edesc; struct aead_edesc *edesc;
dma_addr_t qm_sg_dma, iv_dma = 0; dma_addr_t qm_sg_dma, iv_dma = 0;
int ivsize = 0; int ivsize = 0;
...@@ -938,13 +939,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -938,13 +939,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
} }
if (likely(req->src == req->dst)) { if (likely(req->src == req->dst)) {
src_nents = sg_nents_for_len(req->src, req->assoclen + src_len = req->assoclen + req->cryptlen +
req->cryptlen + (encrypt ? authsize : 0);
(encrypt ? authsize : 0));
src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) { if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
req->assoclen + req->cryptlen + src_len);
(encrypt ? authsize : 0));
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(src_nents); return ERR_PTR(src_nents);
} }
...@@ -957,23 +958,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -957,23 +958,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
} else { } else {
src_nents = sg_nents_for_len(req->src, req->assoclen + src_len = req->assoclen + req->cryptlen;
req->cryptlen); dst_len = src_len + (encrypt ? authsize : (-authsize));
src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) { if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
req->assoclen + req->cryptlen); src_len);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(src_nents); return ERR_PTR(src_nents);
} }
dst_nents = sg_nents_for_len(req->dst, req->assoclen + dst_nents = sg_nents_for_len(req->dst, dst_len);
req->cryptlen +
(encrypt ? authsize :
(-authsize)));
if (unlikely(dst_nents < 0)) { if (unlikely(dst_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
req->assoclen + req->cryptlen + dst_len);
(encrypt ? authsize : (-authsize)));
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(dst_nents); return ERR_PTR(dst_nents);
} }
...@@ -1082,12 +1081,11 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -1082,12 +1081,11 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
qm_sg_index++; qm_sg_index++;
} }
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
qm_sg_index += mapped_src_nents; qm_sg_index += mapped_src_nents;
if (mapped_dst_nents > 1) if (mapped_dst_nents > 1)
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
qm_sg_index, 0);
qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(qidev, qm_sg_dma)) { if (dma_mapping_error(qidev, qm_sg_dma)) {
...@@ -1340,10 +1338,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, ...@@ -1340,10 +1338,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
edesc->drv_req.drv_ctx = drv_ctx; edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0);
if (mapped_dst_nents > 1) if (mapped_dst_nents > 1)
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table +
dst_sg_idx, 0); dst_sg_idx, 0);
edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
......
...@@ -371,6 +371,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -371,6 +371,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC; GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
int src_len, dst_len = 0;
struct aead_edesc *edesc; struct aead_edesc *edesc;
dma_addr_t qm_sg_dma, iv_dma = 0; dma_addr_t qm_sg_dma, iv_dma = 0;
int ivsize = 0; int ivsize = 0;
...@@ -387,23 +388,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -387,23 +388,21 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
} }
if (unlikely(req->dst != req->src)) { if (unlikely(req->dst != req->src)) {
src_nents = sg_nents_for_len(req->src, req->assoclen + src_len = req->assoclen + req->cryptlen;
req->cryptlen); dst_len = src_len + (encrypt ? authsize : (-authsize));
src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) { if (unlikely(src_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in src S/G\n", dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
req->assoclen + req->cryptlen); src_len);
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(src_nents); return ERR_PTR(src_nents);
} }
dst_nents = sg_nents_for_len(req->dst, req->assoclen + dst_nents = sg_nents_for_len(req->dst, dst_len);
req->cryptlen +
(encrypt ? authsize :
(-authsize)));
if (unlikely(dst_nents < 0)) { if (unlikely(dst_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
req->assoclen + req->cryptlen + dst_len);
(encrypt ? authsize : (-authsize)));
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(dst_nents); return ERR_PTR(dst_nents);
} }
...@@ -434,13 +433,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -434,13 +433,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
mapped_dst_nents = 0; mapped_dst_nents = 0;
} }
} else { } else {
src_nents = sg_nents_for_len(req->src, req->assoclen + src_len = req->assoclen + req->cryptlen +
req->cryptlen + (encrypt ? authsize : 0);
(encrypt ? authsize : 0));
src_nents = sg_nents_for_len(req->src, src_len);
if (unlikely(src_nents < 0)) { if (unlikely(src_nents < 0)) {
dev_err(dev, "Insufficient bytes (%d) in src S/G\n", dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
req->assoclen + req->cryptlen + src_len);
(encrypt ? authsize : 0));
qi_cache_free(edesc); qi_cache_free(edesc);
return ERR_PTR(src_nents); return ERR_PTR(src_nents);
} }
...@@ -536,12 +535,11 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, ...@@ -536,12 +535,11 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
qm_sg_index++; qm_sg_index++;
} }
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
qm_sg_index += mapped_src_nents; qm_sg_index += mapped_src_nents;
if (mapped_dst_nents > 1) if (mapped_dst_nents > 1)
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
qm_sg_index, 0);
qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(dev, qm_sg_dma)) { if (dma_mapping_error(dev, qm_sg_dma)) {
...@@ -1159,10 +1157,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) ...@@ -1159,10 +1157,10 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
edesc->qm_sg_bytes = qm_sg_bytes; edesc->qm_sg_bytes = qm_sg_bytes;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0); sg_to_qm_sg_last(req->src, req->cryptlen, sg_table + 1, 0);
if (mapped_dst_nents > 1) if (mapped_dst_nents > 1)
sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + sg_to_qm_sg_last(req->dst, req->cryptlen, sg_table +
dst_sg_idx, 0); dst_sg_idx, 0);
edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
...@@ -3422,9 +3420,9 @@ static int ahash_update_ctx(struct ahash_request *req) ...@@ -3422,9 +3420,9 @@ static int ahash_update_ctx(struct ahash_request *req)
if (to_hash) { if (to_hash) {
struct dpaa2_sg_entry *sg_table; struct dpaa2_sg_entry *sg_table;
int src_len = req->nbytes - *next_buflen;
src_nents = sg_nents_for_len(req->src, src_nents = sg_nents_for_len(req->src, src_len);
req->nbytes - (*next_buflen));
if (src_nents < 0) { if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n"); dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents; return src_nents;
...@@ -3465,7 +3463,7 @@ static int ahash_update_ctx(struct ahash_request *req) ...@@ -3465,7 +3463,7 @@ static int ahash_update_ctx(struct ahash_request *req)
goto unmap_ctx; goto unmap_ctx;
if (mapped_nents) { if (mapped_nents) {
sg_to_qm_sg_last(req->src, mapped_nents, sg_to_qm_sg_last(req->src, src_len,
sg_table + qm_sg_src_index, 0); sg_table + qm_sg_src_index, 0);
if (*next_buflen) if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src, scatterwalk_map_and_copy(next_buf, req->src,
...@@ -3653,7 +3651,7 @@ static int ahash_finup_ctx(struct ahash_request *req) ...@@ -3653,7 +3651,7 @@ static int ahash_finup_ctx(struct ahash_request *req)
if (ret) if (ret)
goto unmap_ctx; goto unmap_ctx;
sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0); sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -3739,7 +3737,7 @@ static int ahash_digest(struct ahash_request *req) ...@@ -3739,7 +3737,7 @@ static int ahash_digest(struct ahash_request *req)
struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table); qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
qm_sg_bytes, DMA_TO_DEVICE); qm_sg_bytes, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
...@@ -3882,9 +3880,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) ...@@ -3882,9 +3880,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (to_hash) { if (to_hash) {
struct dpaa2_sg_entry *sg_table; struct dpaa2_sg_entry *sg_table;
int src_len = req->nbytes - *next_buflen;
src_nents = sg_nents_for_len(req->src, src_nents = sg_nents_for_len(req->src, src_len);
req->nbytes - *next_buflen);
if (src_nents < 0) { if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n"); dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents; return src_nents;
...@@ -3918,7 +3916,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) ...@@ -3918,7 +3916,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (ret) if (ret)
goto unmap_ctx; goto unmap_ctx;
sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
if (*next_buflen) if (*next_buflen)
scatterwalk_map_and_copy(next_buf, req->src, scatterwalk_map_and_copy(next_buf, req->src,
...@@ -4037,7 +4035,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) ...@@ -4037,7 +4035,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
if (ret) if (ret)
goto unmap; goto unmap;
sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0); sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -4107,9 +4105,9 @@ static int ahash_update_first(struct ahash_request *req) ...@@ -4107,9 +4105,9 @@ static int ahash_update_first(struct ahash_request *req)
if (to_hash) { if (to_hash) {
struct dpaa2_sg_entry *sg_table; struct dpaa2_sg_entry *sg_table;
int src_len = req->nbytes - *next_buflen;
src_nents = sg_nents_for_len(req->src, src_nents = sg_nents_for_len(req->src, src_len);
req->nbytes - (*next_buflen));
if (src_nents < 0) { if (src_nents < 0) {
dev_err(ctx->dev, "Invalid number of src SG.\n"); dev_err(ctx->dev, "Invalid number of src SG.\n");
return src_nents; return src_nents;
...@@ -4144,7 +4142,7 @@ static int ahash_update_first(struct ahash_request *req) ...@@ -4144,7 +4142,7 @@ static int ahash_update_first(struct ahash_request *req)
if (mapped_nents > 1) { if (mapped_nents > 1) {
int qm_sg_bytes; int qm_sg_bytes;
sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0); sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
qm_sg_bytes = pad_sg_nents(mapped_nents) * qm_sg_bytes = pad_sg_nents(mapped_nents) *
sizeof(*sg_table); sizeof(*sg_table);
edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
......
...@@ -729,7 +729,7 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, ...@@ -729,7 +729,7 @@ static int ahash_edesc_add_src(struct caam_hash_ctx *ctx,
unsigned int sgsize = sizeof(*sg) * unsigned int sgsize = sizeof(*sg) *
pad_sg_nents(first_sg + nents); pad_sg_nents(first_sg + nents);
sg_to_sec4_sg_last(req->src, nents, sg + first_sg, 0); sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0);
src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE);
if (dma_mapping_error(ctx->jrdev, src_dma)) { if (dma_mapping_error(ctx->jrdev, src_dma)) {
...@@ -788,9 +788,9 @@ static int ahash_update_ctx(struct ahash_request *req) ...@@ -788,9 +788,9 @@ static int ahash_update_ctx(struct ahash_request *req)
if (to_hash) { if (to_hash) {
int pad_nents; int pad_nents;
int src_len = req->nbytes - *next_buflen;
src_nents = sg_nents_for_len(req->src, src_nents = sg_nents_for_len(req->src, src_len);
req->nbytes - (*next_buflen));
if (src_nents < 0) { if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n"); dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents; return src_nents;
...@@ -835,7 +835,7 @@ static int ahash_update_ctx(struct ahash_request *req) ...@@ -835,7 +835,7 @@ static int ahash_update_ctx(struct ahash_request *req)
goto unmap_ctx; goto unmap_ctx;
if (mapped_nents) if (mapped_nents)
sg_to_sec4_sg_last(req->src, mapped_nents, sg_to_sec4_sg_last(req->src, src_len,
edesc->sec4_sg + sec4_sg_src_index, edesc->sec4_sg + sec4_sg_src_index,
0); 0);
else else
...@@ -1208,9 +1208,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) ...@@ -1208,9 +1208,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (to_hash) { if (to_hash) {
int pad_nents; int pad_nents;
int src_len = req->nbytes - *next_buflen;
src_nents = sg_nents_for_len(req->src, src_nents = sg_nents_for_len(req->src, src_len);
req->nbytes - *next_buflen);
if (src_nents < 0) { if (src_nents < 0) {
dev_err(jrdev, "Invalid number of src SG.\n"); dev_err(jrdev, "Invalid number of src SG.\n");
return src_nents; return src_nents;
...@@ -1250,8 +1250,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) ...@@ -1250,8 +1250,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
if (ret) if (ret)
goto unmap_ctx; goto unmap_ctx;
sg_to_sec4_sg_last(req->src, mapped_nents, sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0);
edesc->sec4_sg + 1, 0);
if (*next_buflen) { if (*next_buflen) {
scatterwalk_map_and_copy(next_buf, req->src, scatterwalk_map_and_copy(next_buf, req->src,
......
...@@ -306,11 +306,11 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, ...@@ -306,11 +306,11 @@ static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req,
0); 0);
if (sec4_sg_index) if (sec4_sg_index)
sg_to_sec4_sg_last(req_ctx->fixup_src, src_nents, sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len,
edesc->sec4_sg + !!diff_size, 0); edesc->sec4_sg + !!diff_size, 0);
if (dst_nents > 1) if (dst_nents > 1)
sg_to_sec4_sg_last(req->dst, dst_nents, sg_to_sec4_sg_last(req->dst, req->dst_len,
edesc->sec4_sg + sec4_sg_index, 0); edesc->sec4_sg + sec4_sg_index, 0);
/* Save nents for later use in Job Descriptor */ /* Save nents for later use in Job Descriptor */
......
...@@ -54,15 +54,19 @@ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr, ...@@ -54,15 +54,19 @@ static inline void dma_to_qm_sg_one_last_ext(struct qm_sg_entry *qm_sg_ptr,
* but does not have final bit; instead, returns last entry * but does not have final bit; instead, returns last entry
*/ */
static inline struct qm_sg_entry * static inline struct qm_sg_entry *
sg_to_qm_sg(struct scatterlist *sg, int sg_count, sg_to_qm_sg(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset) struct qm_sg_entry *qm_sg_ptr, u16 offset)
{ {
while (sg_count && sg) { int ent_len;
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
sg_dma_len(sg), offset); while (len) {
ent_len = min_t(int, sg_dma_len(sg), len);
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
offset);
qm_sg_ptr++; qm_sg_ptr++;
sg = sg_next(sg); sg = sg_next(sg);
sg_count--; len -= ent_len;
} }
return qm_sg_ptr - 1; return qm_sg_ptr - 1;
} }
...@@ -71,10 +75,10 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count, ...@@ -71,10 +75,10 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
* convert scatterlist to h/w link table format * convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped * scatterlist must have been previously dma mapped
*/ */
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
struct qm_sg_entry *qm_sg_ptr, u16 offset) struct qm_sg_entry *qm_sg_ptr, u16 offset)
{ {
qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr)); qm_sg_entry_set_f(qm_sg_ptr, qm_sg_entry_get_len(qm_sg_ptr));
} }
......
...@@ -25,15 +25,19 @@ static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr, ...@@ -25,15 +25,19 @@ static inline void dma_to_qm_sg_one(struct dpaa2_sg_entry *qm_sg_ptr,
* but does not have final bit; instead, returns last entry * but does not have final bit; instead, returns last entry
*/ */
static inline struct dpaa2_sg_entry * static inline struct dpaa2_sg_entry *
sg_to_qm_sg(struct scatterlist *sg, int sg_count, sg_to_qm_sg(struct scatterlist *sg, int len,
struct dpaa2_sg_entry *qm_sg_ptr, u16 offset) struct dpaa2_sg_entry *qm_sg_ptr, u16 offset)
{ {
while (sg_count && sg) { int ent_len;
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg),
sg_dma_len(sg), offset); while (len) {
ent_len = min_t(int, sg_dma_len(sg), len);
dma_to_qm_sg_one(qm_sg_ptr, sg_dma_address(sg), ent_len,
offset);
qm_sg_ptr++; qm_sg_ptr++;
sg = sg_next(sg); sg = sg_next(sg);
sg_count--; len -= ent_len;
} }
return qm_sg_ptr - 1; return qm_sg_ptr - 1;
} }
...@@ -42,11 +46,11 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count, ...@@ -42,11 +46,11 @@ sg_to_qm_sg(struct scatterlist *sg, int sg_count,
* convert scatterlist to h/w link table format * convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped * scatterlist must have been previously dma mapped
*/ */
static inline void sg_to_qm_sg_last(struct scatterlist *sg, int sg_count, static inline void sg_to_qm_sg_last(struct scatterlist *sg, int len,
struct dpaa2_sg_entry *qm_sg_ptr, struct dpaa2_sg_entry *qm_sg_ptr,
u16 offset) u16 offset)
{ {
qm_sg_ptr = sg_to_qm_sg(sg, sg_count, qm_sg_ptr, offset); qm_sg_ptr = sg_to_qm_sg(sg, len, qm_sg_ptr, offset);
dpaa2_sg_set_final(qm_sg_ptr, true); dpaa2_sg_set_final(qm_sg_ptr, true);
} }
......
...@@ -45,15 +45,19 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr, ...@@ -45,15 +45,19 @@ static inline void dma_to_sec4_sg_one(struct sec4_sg_entry *sec4_sg_ptr,
* but does not have final bit; instead, returns last entry * but does not have final bit; instead, returns last entry
*/ */
static inline struct sec4_sg_entry * static inline struct sec4_sg_entry *
sg_to_sec4_sg(struct scatterlist *sg, int sg_count, sg_to_sec4_sg(struct scatterlist *sg, int len,
struct sec4_sg_entry *sec4_sg_ptr, u16 offset) struct sec4_sg_entry *sec4_sg_ptr, u16 offset)
{ {
while (sg_count) { int ent_len;
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
sg_dma_len(sg), offset); while (len) {
ent_len = min_t(int, sg_dma_len(sg), len);
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), ent_len,
offset);
sec4_sg_ptr++; sec4_sg_ptr++;
sg = sg_next(sg); sg = sg_next(sg);
sg_count--; len -= ent_len;
} }
return sec4_sg_ptr - 1; return sec4_sg_ptr - 1;
} }
...@@ -70,11 +74,11 @@ static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr) ...@@ -70,11 +74,11 @@ static inline void sg_to_sec4_set_last(struct sec4_sg_entry *sec4_sg_ptr)
* convert scatterlist to h/w link table format * convert scatterlist to h/w link table format
* scatterlist must have been previously dma mapped * scatterlist must have been previously dma mapped
*/ */
static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int len,
struct sec4_sg_entry *sec4_sg_ptr, struct sec4_sg_entry *sec4_sg_ptr,
u16 offset) u16 offset)
{ {
sec4_sg_ptr = sg_to_sec4_sg(sg, sg_count, sec4_sg_ptr, offset); sec4_sg_ptr = sg_to_sec4_sg(sg, len, sec4_sg_ptr, offset);
sg_to_sec4_set_last(sec4_sg_ptr); sg_to_sec4_set_last(sec4_sg_ptr);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment