Commit f2abe0d7 authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: x86 - convert to use crypto_simd_usable()

Replace all calls to irq_fpu_usable() in the x86 crypto code with
crypto_simd_usable(), in order to allow testing the no-SIMD code paths.
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent b55e1a39
...@@ -30,8 +30,8 @@ ...@@ -30,8 +30,8 @@
#include <crypto/gcm.h> #include <crypto/gcm.h>
#include <crypto/xts.h> #include <crypto/xts.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/fpu/api.h>
#include <asm/crypto/aes.h> #include <asm/crypto/aes.h>
#include <asm/simd.h>
#include <crypto/scatterwalk.h> #include <crypto/scatterwalk.h>
#include <crypto/internal/aead.h> #include <crypto/internal/aead.h>
#include <crypto/internal/simd.h> #include <crypto/internal/simd.h>
...@@ -332,7 +332,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, ...@@ -332,7 +332,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
return -EINVAL; return -EINVAL;
} }
if (!irq_fpu_usable()) if (!crypto_simd_usable())
err = crypto_aes_expand_key(ctx, in_key, key_len); err = crypto_aes_expand_key(ctx, in_key, key_len);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -353,7 +353,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -353,7 +353,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{ {
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (!irq_fpu_usable()) if (!crypto_simd_usable())
crypto_aes_encrypt_x86(ctx, dst, src); crypto_aes_encrypt_x86(ctx, dst, src);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -366,7 +366,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -366,7 +366,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{ {
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (!irq_fpu_usable()) if (!crypto_simd_usable())
crypto_aes_decrypt_x86(ctx, dst, src); crypto_aes_decrypt_x86(ctx, dst, src);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
......
...@@ -12,10 +12,10 @@ ...@@ -12,10 +12,10 @@
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/chacha.h> #include <crypto/chacha.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/fpu/api.h>
#include <asm/simd.h> #include <asm/simd.h>
#define CHACHA_STATE_ALIGN 16 #define CHACHA_STATE_ALIGN 16
...@@ -170,7 +170,7 @@ static int chacha_simd(struct skcipher_request *req) ...@@ -170,7 +170,7 @@ static int chacha_simd(struct skcipher_request *req)
struct skcipher_walk walk; struct skcipher_walk walk;
int err; int err;
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable()) if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_chacha_crypt(req); return crypto_chacha_crypt(req);
err = skcipher_walk_virt(&walk, req, true); err = skcipher_walk_virt(&walk, req, true);
...@@ -193,7 +193,7 @@ static int xchacha_simd(struct skcipher_request *req) ...@@ -193,7 +193,7 @@ static int xchacha_simd(struct skcipher_request *req)
u8 real_iv[16]; u8 real_iv[16];
int err; int err;
if (req->cryptlen <= CHACHA_BLOCK_SIZE || !irq_fpu_usable()) if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable())
return crypto_xchacha_crypt(req); return crypto_xchacha_crypt(req);
err = skcipher_walk_virt(&walk, req, true); err = skcipher_walk_virt(&walk, req, true);
......
...@@ -32,10 +32,11 @@ ...@@ -32,10 +32,11 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/fpu/api.h> #include <asm/simd.h>
#define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4 #define CHKSUM_DIGEST_SIZE 4
...@@ -54,7 +55,7 @@ static u32 __attribute__((pure)) ...@@ -54,7 +55,7 @@ static u32 __attribute__((pure))
unsigned int iremainder; unsigned int iremainder;
unsigned int prealign; unsigned int prealign;
if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !irq_fpu_usable()) if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !crypto_simd_usable())
return crc32_le(crc, p, len); return crc32_le(crc, p, len);
if ((long)p & SCALE_F_MASK) { if ((long)p & SCALE_F_MASK) {
......
...@@ -29,10 +29,11 @@ ...@@ -29,10 +29,11 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/fpu/internal.h> #include <asm/simd.h>
#define CHKSUM_BLOCK_SIZE 1 #define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4 #define CHKSUM_DIGEST_SIZE 4
...@@ -177,7 +178,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data, ...@@ -177,7 +178,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
* use faster PCL version if datasize is large enough to * use faster PCL version if datasize is large enough to
* overcome kernel fpu state save/restore overhead * overcome kernel fpu state save/restore overhead
*/ */
if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) { if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
kernel_fpu_begin(); kernel_fpu_begin();
*crcp = crc_pcl(data, len, *crcp); *crcp = crc_pcl(data, len, *crcp);
kernel_fpu_end(); kernel_fpu_end();
...@@ -189,7 +190,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data, ...@@ -189,7 +190,7 @@ static int crc32c_pcl_intel_update(struct shash_desc *desc, const u8 *data,
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len, static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out) u8 *out)
{ {
if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) { if (len >= CRC32C_PCL_BREAKEVEN && crypto_simd_usable()) {
kernel_fpu_begin(); kernel_fpu_begin();
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp)); *(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
kernel_fpu_end(); kernel_fpu_end();
......
...@@ -26,12 +26,13 @@ ...@@ -26,12 +26,13 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/crc-t10dif.h> #include <linux/crc-t10dif.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/fpu/api.h>
#include <asm/cpufeatures.h> #include <asm/cpufeatures.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/simd.h>
asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len); asmlinkage u16 crc_t10dif_pcl(u16 init_crc, const u8 *buf, size_t len);
...@@ -53,7 +54,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data, ...@@ -53,7 +54,7 @@ static int chksum_update(struct shash_desc *desc, const u8 *data,
{ {
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
if (length >= 16 && irq_fpu_usable()) { if (length >= 16 && crypto_simd_usable()) {
kernel_fpu_begin(); kernel_fpu_begin();
ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); ctx->crc = crc_t10dif_pcl(ctx->crc, data, length);
kernel_fpu_end(); kernel_fpu_end();
...@@ -73,7 +74,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out) ...@@ -73,7 +74,7 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len, static int __chksum_finup(__u16 *crcp, const u8 *data, unsigned int len,
u8 *out) u8 *out)
{ {
if (len >= 16 && irq_fpu_usable()) { if (len >= 16 && crypto_simd_usable()) {
kernel_fpu_begin(); kernel_fpu_begin();
*(__u16 *)out = crc_t10dif_pcl(*crcp, data, len); *(__u16 *)out = crc_t10dif_pcl(*crcp, data, len);
kernel_fpu_end(); kernel_fpu_end();
......
...@@ -19,8 +19,9 @@ ...@@ -19,8 +19,9 @@
#include <crypto/cryptd.h> #include <crypto/cryptd.h>
#include <crypto/gf128mul.h> #include <crypto/gf128mul.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <asm/fpu/api.h> #include <crypto/internal/simd.h>
#include <asm/cpu_device_id.h> #include <asm/cpu_device_id.h>
#include <asm/simd.h>
#define GHASH_BLOCK_SIZE 16 #define GHASH_BLOCK_SIZE 16
#define GHASH_DIGEST_SIZE 16 #define GHASH_DIGEST_SIZE 16
...@@ -182,7 +183,7 @@ static int ghash_async_update(struct ahash_request *req) ...@@ -182,7 +183,7 @@ static int ghash_async_update(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
if (!irq_fpu_usable() || if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
...@@ -200,7 +201,7 @@ static int ghash_async_final(struct ahash_request *req) ...@@ -200,7 +201,7 @@ static int ghash_async_final(struct ahash_request *req)
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
if (!irq_fpu_usable() || if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
...@@ -241,7 +242,7 @@ static int ghash_async_digest(struct ahash_request *req) ...@@ -241,7 +242,7 @@ static int ghash_async_digest(struct ahash_request *req)
struct ahash_request *cryptd_req = ahash_request_ctx(req); struct ahash_request *cryptd_req = ahash_request_ctx(req);
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
if (!irq_fpu_usable() || if (!crypto_simd_usable() ||
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) { (in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base); ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
......
...@@ -7,9 +7,10 @@ ...@@ -7,9 +7,10 @@
*/ */
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h> #include <crypto/nhpoly1305.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/fpu/api.h> #include <asm/simd.h>
asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len, asmlinkage void nh_avx2(const u32 *key, const u8 *message, size_t message_len,
u8 hash[NH_HASH_BYTES]); u8 hash[NH_HASH_BYTES]);
...@@ -24,7 +25,7 @@ static void _nh_avx2(const u32 *key, const u8 *message, size_t message_len, ...@@ -24,7 +25,7 @@ static void _nh_avx2(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_avx2_update(struct shash_desc *desc, static int nhpoly1305_avx2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen) const u8 *src, unsigned int srclen)
{ {
if (srclen < 64 || !irq_fpu_usable()) if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
......
...@@ -7,9 +7,10 @@ ...@@ -7,9 +7,10 @@
*/ */
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/nhpoly1305.h> #include <crypto/nhpoly1305.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/fpu/api.h> #include <asm/simd.h>
asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len, asmlinkage void nh_sse2(const u32 *key, const u8 *message, size_t message_len,
u8 hash[NH_HASH_BYTES]); u8 hash[NH_HASH_BYTES]);
...@@ -24,7 +25,7 @@ static void _nh_sse2(const u32 *key, const u8 *message, size_t message_len, ...@@ -24,7 +25,7 @@ static void _nh_sse2(const u32 *key, const u8 *message, size_t message_len,
static int nhpoly1305_sse2_update(struct shash_desc *desc, static int nhpoly1305_sse2_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen) const u8 *src, unsigned int srclen)
{ {
if (srclen < 64 || !irq_fpu_usable()) if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen); return crypto_nhpoly1305_update(desc, src, srclen);
do { do {
......
...@@ -11,11 +11,11 @@ ...@@ -11,11 +11,11 @@
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/poly1305.h> #include <crypto/poly1305.h>
#include <linux/crypto.h> #include <linux/crypto.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/module.h> #include <linux/module.h>
#include <asm/fpu/api.h>
#include <asm/simd.h> #include <asm/simd.h>
struct poly1305_simd_desc_ctx { struct poly1305_simd_desc_ctx {
...@@ -126,7 +126,7 @@ static int poly1305_simd_update(struct shash_desc *desc, ...@@ -126,7 +126,7 @@ static int poly1305_simd_update(struct shash_desc *desc,
unsigned int bytes; unsigned int bytes;
/* kernel_fpu_begin/end is costly, use fallback for small updates */ /* kernel_fpu_begin/end is costly, use fallback for small updates */
if (srclen <= 288 || !may_use_simd()) if (srclen <= 288 || !crypto_simd_usable())
return crypto_poly1305_update(desc, src, srclen); return crypto_poly1305_update(desc, src, srclen);
kernel_fpu_begin(); kernel_fpu_begin();
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -29,7 +30,7 @@ ...@@ -29,7 +30,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha1_base.h> #include <crypto/sha1_base.h>
#include <asm/fpu/api.h> #include <asm/simd.h>
typedef void (sha1_transform_fn)(u32 *digest, const char *data, typedef void (sha1_transform_fn)(u32 *digest, const char *data,
unsigned int rounds); unsigned int rounds);
...@@ -39,7 +40,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, ...@@ -39,7 +40,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
{ {
struct sha1_state *sctx = shash_desc_ctx(desc); struct sha1_state *sctx = shash_desc_ctx(desc);
if (!irq_fpu_usable() || if (!crypto_simd_usable() ||
(sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE) (sctx->count % SHA1_BLOCK_SIZE) + len < SHA1_BLOCK_SIZE)
return crypto_sha1_update(desc, data, len); return crypto_sha1_update(desc, data, len);
...@@ -57,7 +58,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data, ...@@ -57,7 +58,7 @@ static int sha1_update(struct shash_desc *desc, const u8 *data,
static int sha1_finup(struct shash_desc *desc, const u8 *data, static int sha1_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha1_transform_fn *sha1_xform) unsigned int len, u8 *out, sha1_transform_fn *sha1_xform)
{ {
if (!irq_fpu_usable()) if (!crypto_simd_usable())
return crypto_sha1_finup(desc, data, len, out); return crypto_sha1_finup(desc, data, len, out);
kernel_fpu_begin(); kernel_fpu_begin();
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -37,8 +38,8 @@ ...@@ -37,8 +38,8 @@
#include <linux/types.h> #include <linux/types.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha256_base.h> #include <crypto/sha256_base.h>
#include <asm/fpu/api.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/simd.h>
asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data, asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
u64 rounds); u64 rounds);
...@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -49,7 +50,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
{ {
struct sha256_state *sctx = shash_desc_ctx(desc); struct sha256_state *sctx = shash_desc_ctx(desc);
if (!irq_fpu_usable() || if (!crypto_simd_usable() ||
(sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE) (sctx->count % SHA256_BLOCK_SIZE) + len < SHA256_BLOCK_SIZE)
return crypto_sha256_update(desc, data, len); return crypto_sha256_update(desc, data, len);
...@@ -67,7 +68,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data, ...@@ -67,7 +68,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
static int sha256_finup(struct shash_desc *desc, const u8 *data, static int sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha256_transform_fn *sha256_xform) unsigned int len, u8 *out, sha256_transform_fn *sha256_xform)
{ {
if (!irq_fpu_usable()) if (!crypto_simd_usable())
return crypto_sha256_finup(desc, data, len, out); return crypto_sha256_finup(desc, data, len, out);
kernel_fpu_begin(); kernel_fpu_begin();
......
...@@ -28,16 +28,16 @@ ...@@ -28,16 +28,16 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/internal/hash.h> #include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/cryptohash.h> #include <linux/cryptohash.h>
#include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <crypto/sha.h> #include <crypto/sha.h>
#include <crypto/sha512_base.h> #include <crypto/sha512_base.h>
#include <asm/fpu/api.h> #include <asm/simd.h>
#include <linux/string.h>
asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data, asmlinkage void sha512_transform_ssse3(u64 *digest, const char *data,
u64 rounds); u64 rounds);
...@@ -49,7 +49,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -49,7 +49,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
{ {
struct sha512_state *sctx = shash_desc_ctx(desc); struct sha512_state *sctx = shash_desc_ctx(desc);
if (!irq_fpu_usable() || if (!crypto_simd_usable() ||
(sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE) (sctx->count[0] % SHA512_BLOCK_SIZE) + len < SHA512_BLOCK_SIZE)
return crypto_sha512_update(desc, data, len); return crypto_sha512_update(desc, data, len);
...@@ -67,7 +67,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data, ...@@ -67,7 +67,7 @@ static int sha512_update(struct shash_desc *desc, const u8 *data,
static int sha512_finup(struct shash_desc *desc, const u8 *data, static int sha512_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out, sha512_transform_fn *sha512_xform) unsigned int len, u8 *out, sha512_transform_fn *sha512_xform)
{ {
if (!irq_fpu_usable()) if (!crypto_simd_usable())
return crypto_sha512_finup(desc, data, len, out); return crypto_sha512_finup(desc, data, len, out);
kernel_fpu_begin(); kernel_fpu_begin();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment