Commit c77da486 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: arm64/chacha - depend on generic chacha library instead of crypto driver

Depend on the generic ChaCha library routines instead of pulling in the
generic ChaCha skcipher driver, which is more than we need, and makes
managing the dependencies between the generic library, generic driver,
accelerated library and driver more complicated.

While at it, drop the logic to prefer the scalar code on short inputs.
Turning the NEON on and off is cheap these days, and one major use case
for ChaCha20 is ChaCha20-Poly1305, which is guaranteed to hit the scalar
path upon every invocation  (when doing the Poly1305 nonce generation)
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 84e03fa3
...@@ -103,7 +103,7 @@ config CRYPTO_CHACHA20_NEON ...@@ -103,7 +103,7 @@ config CRYPTO_CHACHA20_NEON
tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions" tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions"
depends on KERNEL_MODE_NEON depends on KERNEL_MODE_NEON
select CRYPTO_SKCIPHER select CRYPTO_SKCIPHER
select CRYPTO_CHACHA20 select CRYPTO_LIB_CHACHA_GENERIC
config CRYPTO_NHPOLY1305_NEON config CRYPTO_NHPOLY1305_NEON
tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)" tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)"
......
...@@ -68,7 +68,7 @@ static int chacha_neon_stream_xor(struct skcipher_request *req, ...@@ -68,7 +68,7 @@ static int chacha_neon_stream_xor(struct skcipher_request *req,
err = skcipher_walk_virt(&walk, req, false); err = skcipher_walk_virt(&walk, req, false);
crypto_chacha_init(state, ctx, iv); chacha_init_generic(state, ctx->key, iv);
while (walk.nbytes > 0) { while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes; unsigned int nbytes = walk.nbytes;
...@@ -76,10 +76,16 @@ static int chacha_neon_stream_xor(struct skcipher_request *req, ...@@ -76,10 +76,16 @@ static int chacha_neon_stream_xor(struct skcipher_request *req,
if (nbytes < walk.total) if (nbytes < walk.total)
nbytes = rounddown(nbytes, walk.stride); nbytes = rounddown(nbytes, walk.stride);
kernel_neon_begin(); if (!crypto_simd_usable()) {
chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr, chacha_crypt_generic(state, walk.dst.virt.addr,
nbytes, ctx->nrounds); walk.src.virt.addr, nbytes,
kernel_neon_end(); ctx->nrounds);
} else {
kernel_neon_begin();
chacha_doneon(state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes, ctx->nrounds);
kernel_neon_end();
}
err = skcipher_walk_done(&walk, walk.nbytes - nbytes); err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
} }
...@@ -91,9 +97,6 @@ static int chacha_neon(struct skcipher_request *req) ...@@ -91,9 +97,6 @@ static int chacha_neon(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req);
return chacha_neon_stream_xor(req, ctx, req->iv); return chacha_neon_stream_xor(req, ctx, req->iv);
} }
...@@ -105,14 +108,15 @@ static int xchacha_neon(struct skcipher_request *req) ...@@ -105,14 +108,15 @@ static int xchacha_neon(struct skcipher_request *req)
u32 state[16]; u32 state[16];
u8 real_iv[16]; u8 real_iv[16];
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) chacha_init_generic(state, ctx->key, req->iv);
return crypto_xchacha_crypt(req);
crypto_chacha_init(state, ctx, req->iv); if (crypto_simd_usable()) {
kernel_neon_begin();
kernel_neon_begin(); hchacha_block_neon(state, subctx.key, ctx->nrounds);
hchacha_block_neon(state, subctx.key, ctx->nrounds); kernel_neon_end();
kernel_neon_end(); } else {
hchacha_block_generic(state, subctx.key, ctx->nrounds);
}
subctx.nrounds = ctx->nrounds; subctx.nrounds = ctx->nrounds;
memcpy(&real_iv[0], req->iv + 24, 8); memcpy(&real_iv[0], req->iv + 24, 8);
...@@ -134,7 +138,7 @@ static struct skcipher_alg algs[] = { ...@@ -134,7 +138,7 @@ static struct skcipher_alg algs[] = {
.ivsize = CHACHA_IV_SIZE, .ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE, .chunksize = CHACHA_BLOCK_SIZE,
.walksize = 5 * CHACHA_BLOCK_SIZE, .walksize = 5 * CHACHA_BLOCK_SIZE,
.setkey = crypto_chacha20_setkey, .setkey = chacha20_setkey,
.encrypt = chacha_neon, .encrypt = chacha_neon,
.decrypt = chacha_neon, .decrypt = chacha_neon,
}, { }, {
...@@ -150,7 +154,7 @@ static struct skcipher_alg algs[] = { ...@@ -150,7 +154,7 @@ static struct skcipher_alg algs[] = {
.ivsize = XCHACHA_IV_SIZE, .ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE, .chunksize = CHACHA_BLOCK_SIZE,
.walksize = 5 * CHACHA_BLOCK_SIZE, .walksize = 5 * CHACHA_BLOCK_SIZE,
.setkey = crypto_chacha20_setkey, .setkey = chacha20_setkey,
.encrypt = xchacha_neon, .encrypt = xchacha_neon,
.decrypt = xchacha_neon, .decrypt = xchacha_neon,
}, { }, {
...@@ -166,7 +170,7 @@ static struct skcipher_alg algs[] = { ...@@ -166,7 +170,7 @@ static struct skcipher_alg algs[] = {
.ivsize = XCHACHA_IV_SIZE, .ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE, .chunksize = CHACHA_BLOCK_SIZE,
.walksize = 5 * CHACHA_BLOCK_SIZE, .walksize = 5 * CHACHA_BLOCK_SIZE,
.setkey = crypto_chacha12_setkey, .setkey = chacha12_setkey,
.encrypt = xchacha_neon, .encrypt = xchacha_neon,
.decrypt = xchacha_neon, .decrypt = xchacha_neon,
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment