Commit 3b9d9021 authored by Tianjia Zhang's avatar Tianjia Zhang Committed by Herbert Xu

crypto: arm64/sm4-ccm - Rewrite skcipher walker loop

The fact that an error in the skcipher walker API are indicated
not only by a non-zero return value, but also by the fact that
walk->nbytes is zero, causes the layout of the skcipher walker
loop to be sufficiently different from the usual layout, which
is not a problem in itself, but it is likely to cause reading
confusion and difficulty in code maintenance.

This patch rewrites skcipher walker loop, and separates the
last chunk cryption from the loop to avoid wrong calls to the
skcipher walker API. In addition to following the usual convention
of checking walk->nbytes, it also makes the loop execute logic
clearer and easier to understand.
Signed-off-by: default avatarTianjia Zhang <tianjia.zhang@linux.alibaba.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent d58fa987
...@@ -166,7 +166,7 @@ static int ccm_crypt(struct aead_request *req, struct skcipher_walk *walk, ...@@ -166,7 +166,7 @@ static int ccm_crypt(struct aead_request *req, struct skcipher_walk *walk,
unsigned int nbytes, u8 *mac)) unsigned int nbytes, u8 *mac))
{ {
u8 __aligned(8) ctr0[SM4_BLOCK_SIZE]; u8 __aligned(8) ctr0[SM4_BLOCK_SIZE];
int err; int err = 0;
/* preserve the initial ctr0 for the TAG */ /* preserve the initial ctr0 for the TAG */
memcpy(ctr0, walk->iv, SM4_BLOCK_SIZE); memcpy(ctr0, walk->iv, SM4_BLOCK_SIZE);
...@@ -177,33 +177,37 @@ static int ccm_crypt(struct aead_request *req, struct skcipher_walk *walk, ...@@ -177,33 +177,37 @@ static int ccm_crypt(struct aead_request *req, struct skcipher_walk *walk,
if (req->assoclen) if (req->assoclen)
ccm_calculate_auth_mac(req, mac); ccm_calculate_auth_mac(req, mac);
do { while (walk->nbytes && walk->nbytes != walk->total) {
unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE; unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
if (walk->nbytes == walk->total)
tail = 0;
if (walk->nbytes - tail) sm4_ce_ccm_crypt(rkey_enc, walk->dst.virt.addr,
sm4_ce_ccm_crypt(rkey_enc, dst, src, walk->iv, walk->src.virt.addr, walk->iv,
walk->nbytes - tail, mac); walk->nbytes - tail, mac);
if (walk->nbytes == walk->total)
sm4_ce_ccm_final(rkey_enc, ctr0, mac);
kernel_neon_end(); kernel_neon_end();
if (walk->nbytes) {
err = skcipher_walk_done(walk, tail); err = skcipher_walk_done(walk, tail);
if (err)
return err;
if (walk->nbytes)
kernel_neon_begin(); kernel_neon_begin();
} }
} while (walk->nbytes > 0);
return 0; if (walk->nbytes) {
sm4_ce_ccm_crypt(rkey_enc, walk->dst.virt.addr,
walk->src.virt.addr, walk->iv,
walk->nbytes, mac);
sm4_ce_ccm_final(rkey_enc, ctr0, mac);
kernel_neon_end();
err = skcipher_walk_done(walk, 0);
} else {
sm4_ce_ccm_final(rkey_enc, ctr0, mac);
kernel_neon_end();
}
return err;
} }
static int ccm_encrypt(struct aead_request *req) static int ccm_encrypt(struct aead_request *req)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment