Commit 41f70e88 authored by Li Zhong's avatar Li Zhong Committed by Thadeu Lima de Souza Cascardo

crypto: vmx - disable preemption to enable vsx in aes_ctr.c

BugLink: http://bugs.launchpad.net/bugs/1732698

[ Upstream commit 7dede913 ]

Some preemptible check warnings were reported from enable_kernel_vsx(). This
patch disables preemption in aes_ctr.c before enabling vsx, and they are now
consistent with other files in the same directory.
Signed-off-by: default avatarLi Zhong <zhong@linux.vnet.ibm.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
Signed-off-by: default avatarThadeu Lima de Souza Cascardo <cascardo@canonical.com>
parent d505e61a
...@@ -80,11 +80,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, ...@@ -80,11 +80,13 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
int ret; int ret;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm); struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_vsx(); enable_kernel_vsx();
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
disable_kernel_vsx(); disable_kernel_vsx();
pagefault_enable(); pagefault_enable();
preempt_enable();
ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
return ret; return ret;
...@@ -99,11 +101,13 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, ...@@ -99,11 +101,13 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx,
u8 *dst = walk->dst.virt.addr; u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes; unsigned int nbytes = walk->nbytes;
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_vsx(); enable_kernel_vsx();
aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key);
disable_kernel_vsx(); disable_kernel_vsx();
pagefault_enable(); pagefault_enable();
preempt_enable();
crypto_xor(keystream, src, nbytes); crypto_xor(keystream, src, nbytes);
memcpy(dst, keystream, nbytes); memcpy(dst, keystream, nbytes);
...@@ -132,6 +136,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, ...@@ -132,6 +136,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); ret = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
preempt_disable();
pagefault_disable(); pagefault_disable();
enable_kernel_vsx(); enable_kernel_vsx();
aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr,
...@@ -143,6 +148,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, ...@@ -143,6 +148,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
walk.iv); walk.iv);
disable_kernel_vsx(); disable_kernel_vsx();
pagefault_enable(); pagefault_enable();
preempt_enable();
/* We need to update IV mostly for last bytes/round */ /* We need to update IV mostly for last bytes/round */
inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE; inc = (nbytes & AES_BLOCK_MASK) / AES_BLOCK_SIZE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment