Commit 4139fd58 authored by Thara Gopinath's avatar Thara Gopinath Committed by Herbert Xu

crypto: qce - Remove totallen and offset in qce_start

totallen is used to get the size of the data to be transformed.
This is also available via nbytes or cryptlen in the qce_sha_reqctx
and qce_cipher_ctx. Similarly offset convey nothing for the supported
encryption and authentication transformations and is always 0.
Remove these two redundant parameters in qce_start.
Reviewed-by: default avatarBjorn Andersson <bjorn.andersson@linaro.org>
Signed-off-by: default avatarThara Gopinath <thara.gopinath@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 62e48428
...@@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size) ...@@ -140,8 +140,7 @@ static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
return cfg; return cfg;
} }
static int qce_setup_regs_ahash(struct crypto_async_request *async_req, static int qce_setup_regs_ahash(struct crypto_async_request *async_req)
u32 totallen, u32 offset)
{ {
struct ahash_request *req = ahash_request_cast(async_req); struct ahash_request *req = ahash_request_cast(async_req);
struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm); struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
...@@ -306,8 +305,7 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey, ...@@ -306,8 +305,7 @@ static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen); qce_write(qce, REG_ENCR_XTS_DU_SIZE, cryptlen);
} }
static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, static int qce_setup_regs_skcipher(struct crypto_async_request *async_req)
u32 totallen, u32 offset)
{ {
struct skcipher_request *req = skcipher_request_cast(async_req); struct skcipher_request *req = skcipher_request_cast(async_req);
struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req);
...@@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, ...@@ -367,7 +365,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg); qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen); qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff); qce_write(qce, REG_ENCR_SEG_START, 0);
if (IS_CTR(flags)) { if (IS_CTR(flags)) {
qce_write(qce, REG_CNTR_MASK, ~0); qce_write(qce, REG_CNTR_MASK, ~0);
...@@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, ...@@ -376,7 +374,7 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
qce_write(qce, REG_CNTR_MASK2, ~0); qce_write(qce, REG_CNTR_MASK2, ~0);
} }
qce_write(qce, REG_SEG_SIZE, totallen); qce_write(qce, REG_SEG_SIZE, rctx->cryptlen);
/* get little endianness */ /* get little endianness */
config = qce_config_reg(qce, 1); config = qce_config_reg(qce, 1);
...@@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req, ...@@ -388,17 +386,16 @@ static int qce_setup_regs_skcipher(struct crypto_async_request *async_req,
} }
#endif #endif
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, int qce_start(struct crypto_async_request *async_req, u32 type)
u32 offset)
{ {
switch (type) { switch (type) {
#ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
case CRYPTO_ALG_TYPE_SKCIPHER: case CRYPTO_ALG_TYPE_SKCIPHER:
return qce_setup_regs_skcipher(async_req, totallen, offset); return qce_setup_regs_skcipher(async_req);
#endif #endif
#ifdef CONFIG_CRYPTO_DEV_QCE_SHA #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
case CRYPTO_ALG_TYPE_AHASH: case CRYPTO_ALG_TYPE_AHASH:
return qce_setup_regs_ahash(async_req, totallen, offset); return qce_setup_regs_ahash(async_req);
#endif #endif
default: default:
return -EINVAL; return -EINVAL;
......
...@@ -94,7 +94,6 @@ struct qce_alg_template { ...@@ -94,7 +94,6 @@ struct qce_alg_template {
void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len); void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
int qce_check_status(struct qce_device *qce, u32 *status); int qce_check_status(struct qce_device *qce, u32 *status);
void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step); void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen, int qce_start(struct crypto_async_request *async_req, u32 type);
u32 offset);
#endif /* _COMMON_H_ */ #endif /* _COMMON_H_ */
...@@ -113,7 +113,7 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) ...@@ -113,7 +113,7 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
qce_dma_issue_pending(&qce->dma); qce_dma_issue_pending(&qce->dma);
ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0); ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret) if (ret)
goto error_terminate; goto error_terminate;
......
...@@ -144,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req) ...@@ -144,7 +144,7 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
qce_dma_issue_pending(&qce->dma); qce_dma_issue_pending(&qce->dma);
ret = qce_start(async_req, tmpl->crypto_alg_type, req->cryptlen, 0); ret = qce_start(async_req, tmpl->crypto_alg_type);
if (ret) if (ret)
goto error_terminate; goto error_terminate;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment