Commit c018c7a9 authored by Corentin Labbe's avatar Corentin Labbe Committed by Herbert Xu

crypto: rockchip - use a rk_crypto_info variable instead of lot of indirection

Instead of using lot of ctx->dev->xx indirections, use an intermediate
variable for rk_crypto_info.
This will help later, when 2 different rk_crypto_info would be used.
Reviewed-by: default avatarJohn Keeping <john@metanate.com>
Signed-off-by: default avatarCorentin Labbe <clabbe@baylibre.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 2e3b1495
...@@ -226,9 +226,10 @@ static int rk_hash_prepare(struct crypto_engine *engine, void *breq) ...@@ -226,9 +226,10 @@ static int rk_hash_prepare(struct crypto_engine *engine, void *breq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
struct rk_crypto_info *rkc = tctx->dev;
int ret; int ret;
ret = dma_map_sg(tctx->dev->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE);
if (ret <= 0) if (ret <= 0)
return -EINVAL; return -EINVAL;
...@@ -243,8 +244,9 @@ static int rk_hash_unprepare(struct crypto_engine *engine, void *breq) ...@@ -243,8 +244,9 @@ static int rk_hash_unprepare(struct crypto_engine *engine, void *breq)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); struct rk_ahash_rctx *rctx = ahash_request_ctx(areq);
struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm);
struct rk_crypto_info *rkc = tctx->dev;
dma_unmap_sg(tctx->dev->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE);
return 0; return 0;
} }
...@@ -257,6 +259,7 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq) ...@@ -257,6 +259,7 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg);
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash);
struct scatterlist *sg = areq->src; struct scatterlist *sg = areq->src;
struct rk_crypto_info *rkc = tctx->dev;
int err = 0; int err = 0;
int i; int i;
u32 v; u32 v;
...@@ -283,13 +286,13 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq) ...@@ -283,13 +286,13 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
rk_ahash_reg_init(areq); rk_ahash_reg_init(areq);
while (sg) { while (sg) {
reinit_completion(&tctx->dev->complete); reinit_completion(&rkc->complete);
tctx->dev->status = 0; rkc->status = 0;
crypto_ahash_dma_start(tctx->dev, sg); crypto_ahash_dma_start(rkc, sg);
wait_for_completion_interruptible_timeout(&tctx->dev->complete, wait_for_completion_interruptible_timeout(&rkc->complete,
msecs_to_jiffies(2000)); msecs_to_jiffies(2000));
if (!tctx->dev->status) { if (!rkc->status) {
dev_err(tctx->dev->dev, "DMA timeout\n"); dev_err(rkc->dev, "DMA timeout\n");
err = -EFAULT; err = -EFAULT;
goto theend; goto theend;
} }
...@@ -306,10 +309,10 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq) ...@@ -306,10 +309,10 @@ static int rk_hash_run(struct crypto_engine *engine, void *breq)
* efficiency, and make it response quickly when dma * efficiency, and make it response quickly when dma
* complete. * complete.
*/ */
readl_poll_timeout(tctx->dev->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000); readl_poll_timeout(rkc->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000);
for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) {
v = readl(tctx->dev->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4);
put_unaligned_le32(v, areq->result + i * 4); put_unaligned_le32(v, areq->result + i * 4);
} }
......
...@@ -303,6 +303,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req) ...@@ -303,6 +303,7 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
unsigned int todo; unsigned int todo;
struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher);
struct rk_crypto_info *rkc = ctx->dev;
algt->stat_req++; algt->stat_req++;
...@@ -330,49 +331,49 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req) ...@@ -330,49 +331,49 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0); scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0);
} }
if (sgs == sgd) { if (sgs == sgd) {
err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
if (err <= 0) { if (err <= 0) {
err = -EINVAL; err = -EINVAL;
goto theend_iv; goto theend_iv;
} }
} else { } else {
err = dma_map_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
if (err <= 0) { if (err <= 0) {
err = -EINVAL; err = -EINVAL;
goto theend_iv; goto theend_iv;
} }
err = dma_map_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
if (err <= 0) { if (err <= 0) {
err = -EINVAL; err = -EINVAL;
goto theend_sgs; goto theend_sgs;
} }
} }
err = 0; err = 0;
rk_cipher_hw_init(ctx->dev, areq); rk_cipher_hw_init(rkc, areq);
if (ivsize) { if (ivsize) {
if (ivsize == DES_BLOCK_SIZE) if (ivsize == DES_BLOCK_SIZE)
memcpy_toio(ctx->dev->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); memcpy_toio(rkc->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize);
else else
memcpy_toio(ctx->dev->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); memcpy_toio(rkc->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize);
} }
reinit_completion(&ctx->dev->complete); reinit_completion(&rkc->complete);
ctx->dev->status = 0; rkc->status = 0;
todo = min(sg_dma_len(sgs), len); todo = min(sg_dma_len(sgs), len);
len -= todo; len -= todo;
crypto_dma_start(ctx->dev, sgs, sgd, todo / 4); crypto_dma_start(rkc, sgs, sgd, todo / 4);
wait_for_completion_interruptible_timeout(&ctx->dev->complete, wait_for_completion_interruptible_timeout(&rkc->complete,
msecs_to_jiffies(2000)); msecs_to_jiffies(2000));
if (!ctx->dev->status) { if (!rkc->status) {
dev_err(ctx->dev->dev, "DMA timeout\n"); dev_err(rkc->dev, "DMA timeout\n");
err = -EFAULT; err = -EFAULT;
goto theend; goto theend;
} }
if (sgs == sgd) { if (sgs == sgd) {
dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
} else { } else {
dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
} }
if (rctx->mode & RK_CRYPTO_DEC) { if (rctx->mode & RK_CRYPTO_DEC) {
memcpy(iv, biv, ivsize); memcpy(iv, biv, ivsize);
...@@ -405,10 +406,10 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req) ...@@ -405,10 +406,10 @@ static int rk_cipher_run(struct crypto_engine *engine, void *async_req)
theend_sgs: theend_sgs:
if (sgs == sgd) { if (sgs == sgd) {
dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_BIDIRECTIONAL); dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL);
} else { } else {
dma_unmap_sg(ctx->dev->dev, sgs, 1, DMA_TO_DEVICE); dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE);
dma_unmap_sg(ctx->dev->dev, sgd, 1, DMA_FROM_DEVICE); dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE);
} }
theend_iv: theend_iv:
return err; return err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment