Commit e7bb5a8d authored by Anton Blanchard's avatar Anton Blanchard Committed by Tim Gardner

crypto: vmx: Only call enable_kernel_vsx()

BugLink: http://bugs.launchpad.net/bugs/1613295

With the recent change to enable_kernel_vsx(), we no longer need
to call enable_kernel_fp() and enable_kernel_altivec().
Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
(cherry picked from commit 1552cd70)
Signed-off-by: default avatarTim Gardner <tim.gardner@canonical.com>
Acked-by: default avatarMarcelo Henrique Cerri <marcelo.cerri@canonical.com>
Signed-off-by: default avatarKamal Mostafa <kamal@canonical.com>
parent 6cbd6589
...@@ -83,7 +83,6 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, ...@@ -83,7 +83,6 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
...@@ -103,7 +102,6 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -103,7 +102,6 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
} else { } else {
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
aes_p8_encrypt(src, dst, &ctx->enc_key); aes_p8_encrypt(src, dst, &ctx->enc_key);
pagefault_enable(); pagefault_enable();
...@@ -120,7 +118,6 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) ...@@ -120,7 +118,6 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
} else { } else {
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
aes_p8_decrypt(src, dst, &ctx->dec_key); aes_p8_decrypt(src, dst, &ctx->dec_key);
pagefault_enable(); pagefault_enable();
......
...@@ -84,7 +84,6 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, ...@@ -84,7 +84,6 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
...@@ -115,7 +114,6 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, ...@@ -115,7 +114,6 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
} else { } else {
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
...@@ -156,7 +154,6 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, ...@@ -156,7 +154,6 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
} else { } else {
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
......
...@@ -81,7 +81,6 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, ...@@ -81,7 +81,6 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
pagefault_enable(); pagefault_enable();
...@@ -100,7 +99,6 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, ...@@ -100,7 +99,6 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
unsigned int nbytes = walk->nbytes; unsigned int nbytes = walk->nbytes;
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
pagefault_enable(); pagefault_enable();
...@@ -133,7 +131,6 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, ...@@ -133,7 +131,6 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
walk.dst.virt.addr, walk.dst.virt.addr,
......
...@@ -118,9 +118,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, ...@@ -118,9 +118,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
enable_kernel_fp();
gcm_init_p8(ctx->htable, (const u64 *) key); gcm_init_p8(ctx->htable, (const u64 *) key);
pagefault_enable(); pagefault_enable();
preempt_enable(); preempt_enable();
...@@ -149,9 +147,7 @@ static int p8_ghash_update(struct shash_desc *desc, ...@@ -149,9 +147,7 @@ static int p8_ghash_update(struct shash_desc *desc,
GHASH_DIGEST_SIZE - dctx->bytes); GHASH_DIGEST_SIZE - dctx->bytes);
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, gcm_ghash_p8(dctx->shash, ctx->htable,
dctx->buffer, GHASH_DIGEST_SIZE); dctx->buffer, GHASH_DIGEST_SIZE);
pagefault_enable(); pagefault_enable();
...@@ -164,9 +160,7 @@ static int p8_ghash_update(struct shash_desc *desc, ...@@ -164,9 +160,7 @@ static int p8_ghash_update(struct shash_desc *desc,
if (len) { if (len) {
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, src, len); gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
pagefault_enable(); pagefault_enable();
preempt_enable(); preempt_enable();
...@@ -195,9 +189,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) ...@@ -195,9 +189,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
dctx->buffer[i] = 0; dctx->buffer[i] = 0;
preempt_disable(); preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_altivec();
enable_kernel_vsx(); enable_kernel_vsx();
enable_kernel_fp();
gcm_ghash_p8(dctx->shash, ctx->htable, gcm_ghash_p8(dctx->shash, ctx->htable,
dctx->buffer, GHASH_DIGEST_SIZE); dctx->buffer, GHASH_DIGEST_SIZE);
pagefault_enable(); pagefault_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment