Commit 99680c5e authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: arm - convert to use crypto_simd_usable()

Replace all calls to may_use_simd() in the arm crypto code with
crypto_simd_usable(), in order to allow testing the no-SIMD code paths.
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent f2abe0d7
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/chacha.h> #include <crypto/chacha.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -93,7 +94,7 @@ static int chacha_neon(struct skcipher_request *req) ...@@ -93,7 +94,7 @@ static int chacha_neon(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req); return crypto_chacha_crypt(req);
return chacha_neon_stream_xor(req, ctx, req->iv); return chacha_neon_stream_xor(req, ctx, req->iv);
...@@ -107,7 +108,7 @@ static int xchacha_neon(struct skcipher_request *req) ...@@ -107,7 +108,7 @@ static int xchacha_neon(struct skcipher_request *req)
u32 state[16]; u32 state[16];
u8 real_iv[16]; u8 real_iv[16];
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_xchacha_crypt(req); return crypto_xchacha_crypt(req);
crypto_chacha_init(state, ctx, req->iv); crypto_chacha_init(state, ctx, req->iv);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <asm/hwcap.h> #include <asm/hwcap.h>
#include <asm/neon.h> #include <asm/neon.h>
...@@ -113,7 +114,7 @@ static int crc32_pmull_update(struct shash_desc *desc, const u8 *data, ...@@ -113,7 +114,7 @@ static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
u32 *crc = shash_desc_ctx(desc); u32 *crc = shash_desc_ctx(desc);
unsigned int l; unsigned int l;
if (may_use_simd()) { if (crypto_simd_usable()) {
if ((u32)data % SCALE_F) { if ((u32)data % SCALE_F) {
l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F)); l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
...@@ -147,7 +148,7 @@ static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data, ...@@ -147,7 +148,7 @@ static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
u32 *crc = shash_desc_ctx(desc); u32 *crc = shash_desc_ctx(desc);
unsigned int l; unsigned int l;
if (may_use_simd()) { if (crypto_simd_usable()) {
if ((u32)data % SCALE_F) { if ((u32)data % SCALE_F) {
l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F)); l = min_t(u32, length, SCALE_F - ((u32)data % SCALE_F));
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <asm/neon.h> #include <asm/neon.h>
#include <asm/simd.h> #include <asm/simd.h>
...@@ -36,7 +37,7 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data, ...@@ -36,7 +37,7 @@ static int crct10dif_update(struct shash_desc *desc, const u8 *data,
{ {
u16 *crc = shash_desc_ctx(desc); u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin(); kernel_neon_begin();
*crc = crc_t10dif_pmull(*crc, data, length); *crc = crc_t10dif_pmull(*crc, data, length);
kernel_neon_end(); kernel_neon_end();
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/gf128mul.h> #include <crypto/gf128mul.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
#include <linux/crypto.h> #include <linux/crypto.h>
...@@ -196,7 +197,7 @@ static int ghash_async_update(struct ahash_request *req) ...@@ -196,7 +197,7 @@ static int ghash_async_update(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
if (!may_use_simd() || if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
...@@ -214,7 +215,7 @@ static int ghash_async_final(struct ahash_request *req) ...@@ -214,7 +215,7 @@ static int ghash_async_final(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
if (!may_use_simd() || if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
...@@ -232,7 +233,7 @@ static int ghash_async_digest(struct ahash_request *req) ...@@ -232,7 +233,7 @@ static int ghash_async_digest(struct ahash_request *req)
struct ahash_request *cryptd_req = ahash_request_ctx(req); struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
if (!may_use_simd() || if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/neon.h> #include <asm/neon.h>
#include <asm/simd.h> #include <asm/simd.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h> #include <crypto/nhpoly1305.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len, ...@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_neon_update(struct shash_desc *desc, static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen) const u8 *src, unsigned int srclen)
{ {
if (srclen < 64 || !may_use_simd()) if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha1_base.h> #include <crypto/sha1_base.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
...@@ -33,7 +34,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data, ...@@ -33,7 +34,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
{ {
struct sha1_state *sctx = shash_desc_ctx(desc); struct sha1_state *sctx = shash_desc_ctx(desc);
if (!may_use_simd() || if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return sha1_update_arm(desc, data, len); return sha1_update_arm(desc, data, len);
...@@ -47,7 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data, ...@@ -47,7 +48,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
if (!may_use_simd()) if (!crypto_simd_usable())
return sha1_finup_arm(desc, data, len, out); return sha1_finup_arm(desc, data, len, out);
kernel_neon_begin(); kernel_neon_begin();
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
*/ */
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -39,7 +40,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data, ...@@ -39,7 +40,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
{ {
struct sha1_state *sctx = shash_desc_ctx(desc); struct sha1_state *sctx = shash_desc_ctx(desc);
if (!may_use_simd() || if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return sha1_update_arm(desc, data, len); return sha1_update_arm(desc, data, len);
...@@ -54,7 +55,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data, ...@@ -54,7 +55,7 @@ static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
static int sha1_neon_finup(struct shash_desc *desc, const u8 *data, static int sha1_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
if (!may_use_simd()) if (!crypto_simd_usable())
return sha1_finup_arm(desc, data, len, out); return sha1_finup_arm(desc, data, len, out);
kernel_neon_begin(); kernel_neon_begin();
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha256_base.h> #include <crypto/sha256_base.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
...@@ -34,7 +35,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data, ...@@ -34,7 +35,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
{ {
struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
if (!may_use_simd() || if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_arm_update(desc, data, len); return crypto_sha256_arm_update(desc, data, len);
...@@ -49,7 +50,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data, ...@@ -49,7 +50,7 @@ static int sha2_ce_update(struct shash_desc *desc, const u8 *data,
static int sha2_ce_finup(struct shash_desc *desc, const u8 *data, static int sha2_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
if (!may_use_simd()) if (!crypto_simd_usable())
return crypto_sha256_arm_finup(desc, data, len, out); return crypto_sha256_arm_finup(desc, data, len, out);
kernel_neon_begin(); kernel_neon_begin();
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
*/ */
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/cryptohash.h> #include <linux/cryptohash.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/string.h> #include <linux/string.h>
...@@ -34,7 +35,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -34,7 +35,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
{ {
struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
if (!may_use_simd() || if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_arm_update(desc, data, len); return crypto_sha256_arm_update(desc, data, len);
...@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
static int sha256_finup(struct shash_desc *desc, const u8 *data, static int sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
if (!may_use_simd()) if (!crypto_simd_usable())
return crypto_sha256_arm_finup(desc, data, len, out); return crypto_sha256_arm_finup(desc, data, len, out);
kernel_neon_begin(); kernel_neon_begin();
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
*/ */
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha512_base.h> #include <crypto/sha512_base.h>
#include <linux/crypto.h> #include <linux/crypto.h>
...@@ -30,7 +31,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data, ...@@ -30,7 +31,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
{ {
struct sha512_state *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
if (!may_use_simd() || if (!crypto_simd_usable() ||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return sha512_arm_update(desc, data, len); return sha512_arm_update(desc, data, len);
...@@ -45,7 +46,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data, ...@@ -45,7 +46,7 @@ static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
static int sha512_neon_finup(struct shash_desc *desc, const u8 *data, static int sha512_neon_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
if (!may_use_simd()) if (!crypto_simd_usable())
return sha512_arm_finup(desc, data, len, out); return sha512_arm_finup(desc, data, len, out);
kernel_neon_begin(); kernel_neon_begin();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment