Commit ae4b688d authored by Huang Ying's avatar Huang Ying Committed by H. Peter Anvin

x86: Move kernel_fpu_using to irq_fpu_usable in asm/i387.h

This function measures whether the FPU/SSE state can be touched in
interrupt context. If the interrupted code is in user space or has no
valid FPU/SSE context (CR0.TS == 1), FPU/SSE state can be used in IRQ
or soft_irq context too.

This is used by AES-NI accelerated AES implementation and PCLMULQDQ
accelerated GHASH implementation.

v3:
 - Renamed to irq_fpu_usable to reflect the purpose of the function.

v2:
 - Renamed to irq_is_fpu_using to reflect the real situation.
Signed-off-by: default avatarHuang Ying <ying.huang@intel.com>
CC: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent f6909f39
...@@ -59,13 +59,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out, ...@@ -59,13 +59,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv); const u8 *in, unsigned int len, u8 *iv);
static inline int kernel_fpu_using(void)
{
if (in_interrupt() && !(read_cr0() & X86_CR0_TS))
return 1;
return 0;
}
static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx) static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
{ {
unsigned long addr = (unsigned long)raw_ctx; unsigned long addr = (unsigned long)raw_ctx;
...@@ -89,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx, ...@@ -89,7 +82,7 @@ static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
return -EINVAL; return -EINVAL;
} }
if (kernel_fpu_using()) if (irq_fpu_usable())
err = crypto_aes_expand_key(ctx, in_key, key_len); err = crypto_aes_expand_key(ctx, in_key, key_len);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -110,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -110,7 +103,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{ {
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (kernel_fpu_using()) if (irq_fpu_usable())
crypto_aes_encrypt_x86(ctx, dst, src); crypto_aes_encrypt_x86(ctx, dst, src);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -123,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -123,7 +116,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{ {
struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
if (kernel_fpu_using()) if (irq_fpu_usable())
crypto_aes_decrypt_x86(ctx, dst, src); crypto_aes_decrypt_x86(ctx, dst, src);
else { else {
kernel_fpu_begin(); kernel_fpu_begin();
...@@ -349,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req) ...@@ -349,7 +342,7 @@ static int ablk_encrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (kernel_fpu_using()) { if (irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req = struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req); ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
...@@ -370,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req) ...@@ -370,7 +363,7 @@ static int ablk_decrypt(struct ablkcipher_request *req)
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct async_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (kernel_fpu_using()) { if (irq_fpu_usable()) {
struct ablkcipher_request *cryptd_req = struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req); ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req)); memcpy(cryptd_req, req, sizeof(*req));
......
...@@ -301,6 +301,14 @@ static inline void kernel_fpu_end(void) ...@@ -301,6 +301,14 @@ static inline void kernel_fpu_end(void)
preempt_enable(); preempt_enable();
} }
static inline bool irq_fpu_usable(void)
{
struct pt_regs *regs;
return !in_interrupt() || !(regs = get_irq_regs()) || \
user_mode(regs) || (read_cr0() & X86_CR0_TS);
}
/* /*
* Some instructions like VIA's padlock instructions generate a spurious * Some instructions like VIA's padlock instructions generate a spurious
* DNA fault but don't modify SSE registers. And these instructions * DNA fault but don't modify SSE registers. And these instructions
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment