Commit 2ba1e798 authored by Iuliana Prodan's avatar Iuliana Prodan Committed by Herbert Xu

crypto: caam - refactor ahash_edesc_alloc

Changed parameters for ahash_edesc_alloc function:
- remove flags since they can be computed in
ahash_edesc_alloc, the only place they are needed;
- use ahash_request instead of caam_hash_ctx, to be
able to compute gfp flags.
Signed-off-by: default avatarIuliana Prodan <iuliana.prodan@nxp.com>
Reviewed-by: default avatarHoria Geanta <horia.geanta@nxp.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent c3f7394e
...@@ -661,11 +661,14 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, ...@@ -661,11 +661,14 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
* Allocate an enhanced descriptor, which contains the hardware descriptor * Allocate an enhanced descriptor, which contains the hardware descriptor
* and space for hardware scatter table containing sg_num entries. * and space for hardware scatter table containing sg_num entries.
*/ */
static struct ahash_edesc *ahash_edesc_alloc(struct caam_hash_ctx *ctx, static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
int sg_num, u32 *sh_desc, int sg_num, u32 *sh_desc,
dma_addr_t sh_desc_dma, dma_addr_t sh_desc_dma)
gfp_t flags)
{ {
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc; struct ahash_edesc *edesc;
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry); unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
...@@ -724,8 +727,6 @@ static int ahash_update_ctx(struct ahash_request *req) ...@@ -724,8 +727,6 @@ static int ahash_update_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf; u8 *buf = state->buf;
int *buflen = &state->buflen; int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen; int *next_buflen = &state->next_buflen;
...@@ -779,8 +780,8 @@ static int ahash_update_ctx(struct ahash_request *req) ...@@ -779,8 +780,8 @@ static int ahash_update_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands, * allocate space for base edesc and hw desc commands,
* link tables * link tables
*/ */
edesc = ahash_edesc_alloc(ctx, pad_nents, ctx->sh_desc_update, edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update,
ctx->sh_desc_update_dma, flags); ctx->sh_desc_update_dma);
if (!edesc) { if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM; return -ENOMEM;
...@@ -854,8 +855,6 @@ static int ahash_final_ctx(struct ahash_request *req) ...@@ -854,8 +855,6 @@ static int ahash_final_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen; int buflen = state->buflen;
u32 *desc; u32 *desc;
int sec4_sg_bytes; int sec4_sg_bytes;
...@@ -867,8 +866,8 @@ static int ahash_final_ctx(struct ahash_request *req) ...@@ -867,8 +866,8 @@ static int ahash_final_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry); sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */ /* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, 4, ctx->sh_desc_fin, edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin,
ctx->sh_desc_fin_dma, flags); ctx->sh_desc_fin_dma);
if (!edesc) if (!edesc)
return -ENOMEM; return -ENOMEM;
...@@ -920,8 +919,6 @@ static int ahash_finup_ctx(struct ahash_request *req) ...@@ -920,8 +919,6 @@ static int ahash_finup_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen; int buflen = state->buflen;
u32 *desc; u32 *desc;
int sec4_sg_src_index; int sec4_sg_src_index;
...@@ -950,9 +947,8 @@ static int ahash_finup_ctx(struct ahash_request *req) ...@@ -950,9 +947,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
sec4_sg_src_index = 1 + (buflen ? 1 : 0); sec4_sg_src_index = 1 + (buflen ? 1 : 0);
/* allocate space for base edesc and hw desc commands, link tables */ /* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
ctx->sh_desc_fin, ctx->sh_desc_fin_dma, ctx->sh_desc_fin, ctx->sh_desc_fin_dma);
flags);
if (!edesc) { if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM; return -ENOMEM;
...@@ -1000,8 +996,6 @@ static int ahash_digest(struct ahash_request *req) ...@@ -1000,8 +996,6 @@ static int ahash_digest(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u32 *desc; u32 *desc;
int digestsize = crypto_ahash_digestsize(ahash); int digestsize = crypto_ahash_digestsize(ahash);
int src_nents, mapped_nents; int src_nents, mapped_nents;
...@@ -1028,9 +1022,8 @@ static int ahash_digest(struct ahash_request *req) ...@@ -1028,9 +1022,8 @@ static int ahash_digest(struct ahash_request *req)
} }
/* allocate space for base edesc and hw desc commands, link tables */ /* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? mapped_nents : 0, edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0,
ctx->sh_desc_digest, ctx->sh_desc_digest_dma, ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
flags);
if (!edesc) { if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM; return -ENOMEM;
...@@ -1077,8 +1070,6 @@ static int ahash_final_no_ctx(struct ahash_request *req) ...@@ -1077,8 +1070,6 @@ static int ahash_final_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf; u8 *buf = state->buf;
int buflen = state->buflen; int buflen = state->buflen;
u32 *desc; u32 *desc;
...@@ -1087,8 +1078,8 @@ static int ahash_final_no_ctx(struct ahash_request *req) ...@@ -1087,8 +1078,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
int ret; int ret;
/* allocate space for base edesc and hw desc commands, link tables */ /* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, 0, ctx->sh_desc_digest, edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest,
ctx->sh_desc_digest_dma, flags); ctx->sh_desc_digest_dma);
if (!edesc) if (!edesc)
return -ENOMEM; return -ENOMEM;
...@@ -1136,8 +1127,6 @@ static int ahash_update_no_ctx(struct ahash_request *req) ...@@ -1136,8 +1127,6 @@ static int ahash_update_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf; u8 *buf = state->buf;
int *buflen = &state->buflen; int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen; int *next_buflen = &state->next_buflen;
...@@ -1190,10 +1179,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) ...@@ -1190,10 +1179,9 @@ static int ahash_update_no_ctx(struct ahash_request *req)
* allocate space for base edesc and hw desc commands, * allocate space for base edesc and hw desc commands,
* link tables * link tables
*/ */
edesc = ahash_edesc_alloc(ctx, pad_nents, edesc = ahash_edesc_alloc(req, pad_nents,
ctx->sh_desc_update_first, ctx->sh_desc_update_first,
ctx->sh_desc_update_first_dma, ctx->sh_desc_update_first_dma);
flags);
if (!edesc) { if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM; return -ENOMEM;
...@@ -1261,8 +1249,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req) ...@@ -1261,8 +1249,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int buflen = state->buflen; int buflen = state->buflen;
u32 *desc; u32 *desc;
int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents;
...@@ -1292,9 +1278,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req) ...@@ -1292,9 +1278,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
sizeof(struct sec4_sg_entry); sizeof(struct sec4_sg_entry);
/* allocate space for base edesc and hw desc commands, link tables */ /* allocate space for base edesc and hw desc commands, link tables */
edesc = ahash_edesc_alloc(ctx, sec4_sg_src_index + mapped_nents, edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents,
ctx->sh_desc_digest, ctx->sh_desc_digest_dma, ctx->sh_desc_digest, ctx->sh_desc_digest_dma);
flags);
if (!edesc) { if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM; return -ENOMEM;
...@@ -1347,8 +1332,6 @@ static int ahash_update_first(struct ahash_request *req) ...@@ -1347,8 +1332,6 @@ static int ahash_update_first(struct ahash_request *req)
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
struct caam_hash_state *state = ahash_request_ctx(req); struct caam_hash_state *state = ahash_request_ctx(req);
struct device *jrdev = ctx->jrdev; struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
u8 *buf = state->buf; u8 *buf = state->buf;
int *buflen = &state->buflen; int *buflen = &state->buflen;
int *next_buflen = &state->next_buflen; int *next_buflen = &state->next_buflen;
...@@ -1396,11 +1379,10 @@ static int ahash_update_first(struct ahash_request *req) ...@@ -1396,11 +1379,10 @@ static int ahash_update_first(struct ahash_request *req)
* allocate space for base edesc and hw desc commands, * allocate space for base edesc and hw desc commands,
* link tables * link tables
*/ */
edesc = ahash_edesc_alloc(ctx, mapped_nents > 1 ? edesc = ahash_edesc_alloc(req, mapped_nents > 1 ?
mapped_nents : 0, mapped_nents : 0,
ctx->sh_desc_update_first, ctx->sh_desc_update_first,
ctx->sh_desc_update_first_dma, ctx->sh_desc_update_first_dma);
flags);
if (!edesc) { if (!edesc) {
dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment