Commit e52b7023 authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: arm64 - convert to use crypto_simd_usable()

Replace all calls to may_use_simd() in the arm64 crypto code with
crypto_simd_usable(), in order to allow testing the no-SIMD code paths.
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 99680c5e
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h> #include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -109,7 +110,7 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) ...@@ -109,7 +110,7 @@ static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen)
static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[], static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
u32 abytes, u32 *macp) u32 abytes, u32 *macp)
{ {
if (may_use_simd()) { if (crypto_simd_usable()) {
kernel_neon_begin(); kernel_neon_begin();
ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc, ce_aes_ccm_auth_data(mac, in, abytes, macp, key->key_enc,
num_rounds(key)); num_rounds(key));
...@@ -255,7 +256,7 @@ static int ccm_encrypt(struct aead_request *req) ...@@ -255,7 +256,7 @@ static int ccm_encrypt(struct aead_request *req)
err = skcipher_walk_aead_encrypt(&walk, req, false); err = skcipher_walk_aead_encrypt(&walk, req, false);
if (may_use_simd()) { if (crypto_simd_usable()) {
while (walk.nbytes) { while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE; u32 tail = walk.nbytes % AES_BLOCK_SIZE;
...@@ -313,7 +314,7 @@ static int ccm_decrypt(struct aead_request *req) ...@@ -313,7 +314,7 @@ static int ccm_decrypt(struct aead_request *req)
err = skcipher_walk_aead_decrypt(&walk, req, false); err = skcipher_walk_aead_decrypt(&walk, req, false);
if (may_use_simd()) { if (crypto_simd_usable()) {
while (walk.nbytes) { while (walk.nbytes) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE; u32 tail = walk.nbytes % AES_BLOCK_SIZE;
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/simd.h> #include <asm/simd.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <crypto/aes.h> #include <crypto/aes.h>
#include <crypto/internal/simd.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -52,7 +53,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) ...@@ -52,7 +53,7 @@ static void aes_cipher_encrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{ {
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (!may_use_simd()) { if (!crypto_simd_usable()) {
__aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx)); __aes_arm64_encrypt(ctx->key_enc, dst, src, num_rounds(ctx));
return; return;
} }
...@@ -66,7 +67,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[]) ...@@ -66,7 +67,7 @@ static void aes_cipher_decrypt(struct crypto_tfm *tfm, u8 dst[], u8 const src[])
{ {
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (!may_use_simd()) { if (!crypto_simd_usable()) {
__aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx)); __aes_arm64_decrypt(ctx->key_dec, dst, src, num_rounds(ctx));
return; return;
} }
......
...@@ -405,7 +405,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req) ...@@ -405,7 +405,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
if (!may_use_simd()) if (!crypto_simd_usable())
return aes_ctr_encrypt_fallback(ctx, req); return aes_ctr_encrypt_fallback(ctx, req);
return ctr_encrypt(req); return ctr_encrypt(req);
...@@ -642,7 +642,7 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks, ...@@ -642,7 +642,7 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
{ {
int rounds = 6 + ctx->key_length / 4; int rounds = 6 + ctx->key_length / 4;
if (may_use_simd()) { if (crypto_simd_usable()) {
kernel_neon_begin(); kernel_neon_begin();
aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before, aes_mac_update(in, ctx->key_enc, rounds, blocks, dg, enc_before,
enc_after); enc_after);
......
...@@ -288,7 +288,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req) ...@@ -288,7 +288,7 @@ static int ctr_encrypt_sync(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); struct aesbs_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
if (!may_use_simd()) if (!crypto_simd_usable())
return aes_ctr_encrypt_fallback(&ctx->fallback, req); return aes_ctr_encrypt_fallback(&ctx->fallback, req);
return ctr_encrypt(req); return ctr_encrypt(req);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/chacha.h> #include <crypto/chacha.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -90,7 +91,7 @@ static int chacha_neon(struct skcipher_request *req) ...@@ -90,7 +91,7 @@ static int chacha_neon(struct skcipher_request *req)
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req); return crypto_chacha_crypt(req);
return chacha_neon_stream_xor(req, ctx, req->iv); return chacha_neon_stream_xor(req, ctx, req->iv);
...@@ -104,7 +105,7 @@ static int xchacha_neon(struct skcipher_request *req) ...@@ -104,7 +105,7 @@ static int xchacha_neon(struct skcipher_request *req)
u32 state[16]; u32 state[16];
u8 real_iv[16]; u8 real_iv[16];
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_xchacha_crypt(req); return crypto_xchacha_crypt(req);
crypto_chacha_init(state, ctx, req->iv); crypto_chacha_init(state, ctx, req->iv);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/string.h> #include <linux/string.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <asm/neon.h> #include <asm/neon.h>
#include <asm/simd.h> #include <asm/simd.h>
...@@ -38,7 +39,7 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data, ...@@ -38,7 +39,7 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
{ {
u16 *crc = shash_desc_ctx(desc); u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin(); kernel_neon_begin();
*crc = crc_t10dif_pmull_p8(*crc, data, length); *crc = crc_t10dif_pmull_p8(*crc, data, length);
kernel_neon_end(); kernel_neon_end();
...@@ -54,7 +55,7 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data, ...@@ -54,7 +55,7 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
{ {
u16 *crc = shash_desc_ctx(desc); u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) { if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
kernel_neon_begin(); kernel_neon_begin();
*crc = crc_t10dif_pmull_p64(*crc, data, length); *crc = crc_t10dif_pmull_p64(*crc, data, length);
kernel_neon_end(); kernel_neon_end();
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <crypto/gf128mul.h> #include <crypto/gf128mul.h>
#include <crypto/internal/aead.h> #include <crypto/internal/aead.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
...@@ -89,7 +90,7 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src, ...@@ -89,7 +90,7 @@ static void ghash_do_update(int blocks, u64 dg[], const char *src,
struct ghash_key const *k, struct ghash_key const *k,
const char *head)) const char *head))
{ {
if (likely(may_use_simd())) { if (likely(crypto_simd_usable())) {
kernel_neon_begin(); kernel_neon_begin();
simd_update(blocks, dg, src, key, head); simd_update(blocks, dg, src, key, head);
kernel_neon_end(); kernel_neon_end();
...@@ -441,7 +442,7 @@ static int gcm_encrypt(struct aead_request *req) ...@@ -441,7 +442,7 @@ static int gcm_encrypt(struct aead_request *req)
err = skcipher_walk_aead_encrypt(&walk, req, false); err = skcipher_walk_aead_encrypt(&walk, req, false);
if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
u32 const *rk = NULL; u32 const *rk = NULL;
kernel_neon_begin(); kernel_neon_begin();
...@@ -565,7 +566,7 @@ static int gcm_decrypt(struct aead_request *req) ...@@ -565,7 +566,7 @@ static int gcm_decrypt(struct aead_request *req)
err = skcipher_walk_aead_decrypt(&walk, req, false); err = skcipher_walk_aead_decrypt(&walk, req, false);
if (likely(may_use_simd() && walk.total >= 2 * AES_BLOCK_SIZE)) { if (likely(crypto_simd_usable() && walk.total >= 2 * AES_BLOCK_SIZE)) {
u32 const *rk = NULL; u32 const *rk = NULL;
kernel_neon_begin(); kernel_neon_begin();
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <asm/neon.h> #include <asm/neon.h>
#include <asm/simd.h> #include <asm/simd.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h> #include <crypto/nhpoly1305.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len, ...@@ -25,7 +26,7 @@ static void _nh_neon(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_neon_update(struct shash_desc *desc, static int nhpoly1305_neon_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen) const u8 *src, unsigned int srclen)
{ {
if (srclen < 64 || !may_use_simd()) if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/simd.h> #include <asm/simd.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha1_base.h> #include <crypto/sha1_base.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
...@@ -38,7 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data, ...@@ -38,7 +39,7 @@ static int sha1_ce_update(struct shash_desc *desc, const u8 *data,
{ {
struct sha1_ce_state *sctx = shash_desc_ctx(desc); struct sha1_ce_state *sctx = shash_desc_ctx(desc);
if (!may_use_simd()) if (!crypto_simd_usable())
return crypto_sha1_update(desc, data, len); return crypto_sha1_update(desc, data, len);
sctx->finalize = 0; sctx->finalize = 0;
...@@ -56,7 +57,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, ...@@ -56,7 +57,7 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha1_ce_state *sctx = shash_desc_ctx(desc); struct sha1_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE); bool finalize = !sctx->sst.count && !(len % SHA1_BLOCK_SIZE);
if (!may_use_simd()) if (!crypto_simd_usable())
return crypto_sha1_finup(desc, data, len, out); return crypto_sha1_finup(desc, data, len, out);
/* /*
...@@ -78,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out) ...@@ -78,7 +79,7 @@ static int sha1_ce_final(struct shash_desc *desc, u8 *out)
{ {
struct sha1_ce_state *sctx = shash_desc_ctx(desc); struct sha1_ce_state *sctx = shash_desc_ctx(desc);
if (!may_use_simd()) if (!crypto_simd_usable())
return crypto_sha1_finup(desc, NULL, 0, out); return crypto_sha1_finup(desc, NULL, 0, out);
sctx->finalize = 0; sctx->finalize = 0;
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/simd.h> #include <asm/simd.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha256_base.h> #include <crypto/sha256_base.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
...@@ -42,7 +43,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data, ...@@ -42,7 +43,7 @@ static int sha256_ce_update(struct shash_desc *desc, const u8 *data,
{ {
struct sha256_ce_state *sctx = shash_desc_ctx(desc); struct sha256_ce_state *sctx = shash_desc_ctx(desc);
if (!may_use_simd()) if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len, return sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order); (sha256_block_fn *)sha256_block_data_order);
...@@ -61,7 +62,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, ...@@ -61,7 +62,7 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
struct sha256_ce_state *sctx = shash_desc_ctx(desc); struct sha256_ce_state *sctx = shash_desc_ctx(desc);
bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE); bool finalize = !sctx->sst.count && !(len % SHA256_BLOCK_SIZE);
if (!may_use_simd()) { if (!crypto_simd_usable()) {
if (len) if (len)
sha256_base_do_update(desc, data, len, sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order); (sha256_block_fn *)sha256_block_data_order);
...@@ -90,7 +91,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out) ...@@ -90,7 +91,7 @@ static int sha256_ce_final(struct shash_desc *desc, u8 *out)
{ {
struct sha256_ce_state *sctx = shash_desc_ctx(desc); struct sha256_ce_state *sctx = shash_desc_ctx(desc);
if (!may_use_simd()) { if (!crypto_simd_usable()) {
sha256_base_do_finalize(desc, sha256_base_do_finalize(desc,
(sha256_block_fn *)sha256_block_data_order); (sha256_block_fn *)sha256_block_data_order);
return sha256_base_finish(desc, out); return sha256_base_finish(desc, out);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/neon.h> #include <asm/neon.h>
#include <asm/simd.h> #include <asm/simd.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha256_base.h> #include <crypto/sha256_base.h>
#include <linux/cryptohash.h> #include <linux/cryptohash.h>
...@@ -89,7 +90,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, ...@@ -89,7 +90,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
{ {
struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
if (!may_use_simd()) if (!crypto_simd_usable())
return sha256_base_do_update(desc, data, len, return sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order); (sha256_block_fn *)sha256_block_data_order);
...@@ -119,7 +120,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, ...@@ -119,7 +120,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data,
static int sha256_finup_neon(struct shash_desc *desc, const u8 *data, static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
if (!may_use_simd()) { if (!crypto_simd_usable()) {
if (len) if (len)
sha256_base_do_update(desc, data, len, sha256_base_do_update(desc, data, len,
(sha256_block_fn *)sha256_block_data_order); (sha256_block_fn *)sha256_block_data_order);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/simd.h> #include <asm/simd.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha3.h> #include <crypto/sha3.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
#include <linux/crypto.h> #include <linux/crypto.h>
...@@ -32,7 +33,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data, ...@@ -32,7 +33,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
struct sha3_state *sctx = shash_desc_ctx(desc); struct sha3_state *sctx = shash_desc_ctx(desc);
unsigned int digest_size = crypto_shash_digestsize(desc->tfm); unsigned int digest_size = crypto_shash_digestsize(desc->tfm);
if (!may_use_simd()) if (!crypto_simd_usable())
return crypto_sha3_update(desc, data, len); return crypto_sha3_update(desc, data, len);
if ((sctx->partial + len) >= sctx->rsiz) { if ((sctx->partial + len) >= sctx->rsiz) {
...@@ -76,7 +77,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out) ...@@ -76,7 +77,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
__le64 *digest = (__le64 *)out; __le64 *digest = (__le64 *)out;
int i; int i;
if (!may_use_simd()) if (!crypto_simd_usable())
return crypto_sha3_final(desc, out); return crypto_sha3_final(desc, out);
sctx->buf[sctx->partial++] = 0x06; sctx->buf[sctx->partial++] = 0x06;
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <asm/simd.h> #include <asm/simd.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha512_base.h> #include <crypto/sha512_base.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
...@@ -31,7 +32,7 @@ asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks); ...@@ -31,7 +32,7 @@ asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
static int sha512_ce_update(struct shash_desc *desc, const u8 *data, static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
{ {
if (!may_use_simd()) if (!crypto_simd_usable())
return sha512_base_do_update(desc, data, len, return sha512_base_do_update(desc, data, len,
(sha512_block_fn *)sha512_block_data_order); (sha512_block_fn *)sha512_block_data_order);
...@@ -46,7 +47,7 @@ static int sha512_ce_update(struct shash_desc *desc, const u8 *data, ...@@ -46,7 +47,7 @@ static int sha512_ce_update(struct shash_desc *desc, const u8 *data,
static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
if (!may_use_simd()) { if (!crypto_simd_usable()) {
if (len) if (len)
sha512_base_do_update(desc, data, len, sha512_base_do_update(desc, data, len,
(sha512_block_fn *)sha512_block_data_order); (sha512_block_fn *)sha512_block_data_order);
...@@ -65,7 +66,7 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data, ...@@ -65,7 +66,7 @@ static int sha512_ce_finup(struct shash_desc *desc, const u8 *data,
static int sha512_ce_final(struct shash_desc *desc, u8 *out) static int sha512_ce_final(struct shash_desc *desc, u8 *out)
{ {
if (!may_use_simd()) { if (!crypto_simd_usable()) {
sha512_base_do_finalize(desc, sha512_base_do_finalize(desc,
(sha512_block_fn *)sha512_block_data_order); (sha512_block_fn *)sha512_block_data_order);
return sha512_base_finish(desc, out); return sha512_base_finish(desc, out);
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/simd.h> #include <asm/simd.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/sm3.h> #include <crypto/sm3.h>
#include <crypto/sm3_base.h> #include <crypto/sm3_base.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
...@@ -28,7 +29,7 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src, ...@@ -28,7 +29,7 @@ asmlinkage void sm3_ce_transform(struct sm3_state *sst, u8 const *src,
static int sm3_ce_update(struct shash_desc *desc, const u8 *data, static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
unsigned int len) unsigned int len)
{ {
if (!may_use_simd()) if (!crypto_simd_usable())
return crypto_sm3_update(desc, data, len); return crypto_sm3_update(desc, data, len);
kernel_neon_begin(); kernel_neon_begin();
...@@ -40,7 +41,7 @@ static int sm3_ce_update(struct shash_desc *desc, const u8 *data, ...@@ -40,7 +41,7 @@ static int sm3_ce_update(struct shash_desc *desc, const u8 *data,
static int sm3_ce_final(struct shash_desc *desc, u8 *out) static int sm3_ce_final(struct shash_desc *desc, u8 *out)
{ {
if (!may_use_simd()) if (!crypto_simd_usable())
return crypto_sm3_finup(desc, NULL, 0, out); return crypto_sm3_finup(desc, NULL, 0, out);
kernel_neon_begin(); kernel_neon_begin();
...@@ -53,7 +54,7 @@ static int sm3_ce_final(struct shash_desc *desc, u8 *out) ...@@ -53,7 +54,7 @@ static int sm3_ce_final(struct shash_desc *desc, u8 *out)
static int sm3_ce_finup(struct shash_desc *desc, const u8 *data, static int sm3_ce_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out) unsigned int len, u8 *out)
{ {
if (!may_use_simd()) if (!crypto_simd_usable())
return crypto_sm3_finup(desc, data, len, out); return crypto_sm3_finup(desc, data, len, out);
kernel_neon_begin(); kernel_neon_begin();
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <asm/neon.h> #include <asm/neon.h>
#include <asm/simd.h> #include <asm/simd.h>
#include <crypto/sm4.h> #include <crypto/sm4.h>
#include <crypto/internal/simd.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
#include <linux/crypto.h> #include <linux/crypto.h>
...@@ -20,7 +21,7 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ...@@ -20,7 +21,7 @@ static void sm4_ce_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{ {
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
if (!may_use_simd()) { if (!crypto_simd_usable()) {
crypto_sm4_encrypt(tfm, out, in); crypto_sm4_encrypt(tfm, out, in);
} else { } else {
kernel_neon_begin(); kernel_neon_begin();
...@@ -33,7 +34,7 @@ static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ...@@ -33,7 +34,7 @@ static void sm4_ce_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{ {
const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm); const struct crypto_sm4_ctx *ctx = crypto_tfm_ctx(tfm);
if (!may_use_simd()) { if (!crypto_simd_usable()) {
crypto_sm4_decrypt(tfm, out, in); crypto_sm4_decrypt(tfm, out, in);
} else { } else {
kernel_neon_begin(); kernel_neon_begin();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment