Commit 6f3196b7 authored by Harald Freudenberger's avatar Harald Freudenberger Committed by Vasily Gorbik

s390/crypto: Rework on paes implementation

There have been some findings during Eric Biggers rework of the
paes implementation which this patch tries to address:

A very minor finding within paes ctr where when the cpacf instruction
returns with only partially data en/decrytped the walk_done() was
mistakenly done with the all data counter.  Please note this can only
happen when the kmctr returns because the protected key became invalid
in the middle of the operation. And this is only with suspend and
resume on a system with different effective wrapping key.

Eric Biggers mentioned that the context struct within the tfm struct
may be shared among multiple kernel threads. So here now a rework
which uses a spinlock per context to protect the read and write of the
protected key blob value. The en/decrypt functions copy the protected
key(s) at the beginning into a param struct and do not work with the
protected key within the context any more. If the protected key in the
param struct becomes invalid, the key material is again converted to
protected key(s) and the context gets this update protected by the
spinlock. Race conditions are still possible and may result in writing
the very same protected key value more than once. So the spinlock
needs to make sure the protected key(s) within the context are
consistent updated.

The ctr page is now locked by a mutex instead of a spinlock. A similar
patch went into the aes_s390 code as a result of a complain "sleeping
function called from invalid context at ...algapi.h". See
commit 1c2c7029 ("s390/crypto: fix possible sleep during spinlock
aquired")' for more.

During testing with instrumented code another issue with the xts
en/decrypt function revealed. The retry cleared the running iv value
and thus let to wrong en/decrypted data.

Tested and verified with additional testcases via AF_ALG interface and
additional selftests within the kernel (which will be made available
as soon as possible).
Reported-by: default avatarEric Biggers <ebiggers@kernel.org>
Signed-off-by: default avatarHarald Freudenberger <freude@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent 657480d9
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* s390 implementation of the AES Cipher Algorithm with protected keys. * s390 implementation of the AES Cipher Algorithm with protected keys.
* *
* s390 Version: * s390 Version:
* Copyright IBM Corp. 2017,2019 * Copyright IBM Corp. 2017,2020
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com> * Harald Freudenberger <freude@de.ibm.com>
*/ */
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/cpufeature.h> #include <linux/cpufeature.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/mutex.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <crypto/internal/skcipher.h> #include <crypto/internal/skcipher.h>
#include <crypto/xts.h> #include <crypto/xts.h>
...@@ -36,7 +37,7 @@ ...@@ -36,7 +37,7 @@
#define PAES_MAX_KEYSIZE 256 #define PAES_MAX_KEYSIZE 256
static u8 *ctrblk; static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock); static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
...@@ -82,16 +83,18 @@ static inline void _free_kb_keybuf(struct key_blob *kb) ...@@ -82,16 +83,18 @@ static inline void _free_kb_keybuf(struct key_blob *kb)
struct s390_paes_ctx { struct s390_paes_ctx {
struct key_blob kb; struct key_blob kb;
struct pkey_protkey pk; struct pkey_protkey pk;
spinlock_t pk_lock;
unsigned long fc; unsigned long fc;
}; };
struct s390_pxts_ctx { struct s390_pxts_ctx {
struct key_blob kb[2]; struct key_blob kb[2];
struct pkey_protkey pk[2]; struct pkey_protkey pk[2];
spinlock_t pk_lock;
unsigned long fc; unsigned long fc;
}; };
static inline int __paes_convert_key(struct key_blob *kb, static inline int __paes_keyblob2pkey(struct key_blob *kb,
struct pkey_protkey *pk) struct pkey_protkey *pk)
{ {
int i, ret; int i, ret;
...@@ -106,22 +109,18 @@ static inline int __paes_convert_key(struct key_blob *kb, ...@@ -106,22 +109,18 @@ static inline int __paes_convert_key(struct key_blob *kb,
return ret; return ret;
} }
static int __paes_set_key(struct s390_paes_ctx *ctx) static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
{ {
unsigned long fc; struct pkey_protkey pkey;
if (__paes_convert_key(&ctx->kb, &ctx->pk)) if (__paes_keyblob2pkey(&ctx->kb, &pkey))
return -EINVAL; return -EINVAL;
/* Pick the correct function code based on the protected key type */ spin_lock_bh(&ctx->pk_lock);
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 : memcpy(&ctx->pk, &pkey, sizeof(pkey));
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 : spin_unlock_bh(&ctx->pk_lock);
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL; return 0;
} }
static int ecb_paes_init(struct crypto_skcipher *tfm) static int ecb_paes_init(struct crypto_skcipher *tfm)
...@@ -129,6 +128,7 @@ static int ecb_paes_init(struct crypto_skcipher *tfm) ...@@ -129,6 +128,7 @@ static int ecb_paes_init(struct crypto_skcipher *tfm)
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL; ctx->kb.key = NULL;
spin_lock_init(&ctx->pk_lock);
return 0; return 0;
} }
...@@ -140,6 +140,24 @@ static void ecb_paes_exit(struct crypto_skcipher *tfm) ...@@ -140,6 +140,24 @@ static void ecb_paes_exit(struct crypto_skcipher *tfm)
_free_kb_keybuf(&ctx->kb); _free_kb_keybuf(&ctx->kb);
} }
static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
if (__paes_convert_key(ctx))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
(ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
(ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
/* Check if the function code is available */
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
return ctx->fc ? 0 : -EINVAL;
}
static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len) unsigned int key_len)
{ {
...@@ -151,7 +169,7 @@ static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, ...@@ -151,7 +169,7 @@ static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
if (rc) if (rc)
return rc; return rc;
return __paes_set_key(ctx); return __ecb_paes_set_key(ctx);
} }
static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
...@@ -161,18 +179,31 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) ...@@ -161,18 +179,31 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
struct skcipher_walk walk; struct skcipher_walk walk;
unsigned int nbytes, n, k; unsigned int nbytes, n, k;
int ret; int ret;
struct {
u8 key[MAXPROTKEYSIZE];
} param;
ret = skcipher_walk_virt(&walk, req, false); ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
while ((nbytes = walk.nbytes) != 0) { while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */ /* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1); n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey, k = cpacf_km(ctx->fc | modifier, &param,
walk.dst.virt.addr, walk.src.virt.addr, n); walk.dst.virt.addr, walk.src.virt.addr, n);
if (k) if (k)
ret = skcipher_walk_done(&walk, nbytes - k); ret = skcipher_walk_done(&walk, nbytes - k);
if (k < n) { if (k < n) {
if (__paes_set_key(ctx) != 0) if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO); return skcipher_walk_done(&walk, -EIO);
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
} }
} }
return ret; return ret;
...@@ -210,6 +241,7 @@ static int cbc_paes_init(struct crypto_skcipher *tfm) ...@@ -210,6 +241,7 @@ static int cbc_paes_init(struct crypto_skcipher *tfm)
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL; ctx->kb.key = NULL;
spin_lock_init(&ctx->pk_lock);
return 0; return 0;
} }
...@@ -221,11 +253,11 @@ static void cbc_paes_exit(struct crypto_skcipher *tfm) ...@@ -221,11 +253,11 @@ static void cbc_paes_exit(struct crypto_skcipher *tfm)
_free_kb_keybuf(&ctx->kb); _free_kb_keybuf(&ctx->kb);
} }
static int __cbc_paes_set_key(struct s390_paes_ctx *ctx) static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
{ {
unsigned long fc; unsigned long fc;
if (__paes_convert_key(&ctx->kb, &ctx->pk)) if (__paes_convert_key(ctx))
return -EINVAL; return -EINVAL;
/* Pick the correct function code based on the protected key type */ /* Pick the correct function code based on the protected key type */
...@@ -268,8 +300,12 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) ...@@ -268,8 +300,12 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
ret = skcipher_walk_virt(&walk, req, false); ret = skcipher_walk_virt(&walk, req, false);
if (ret) if (ret)
return ret; return ret;
memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
while ((nbytes = walk.nbytes) != 0) { while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */ /* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1); n = nbytes & ~(AES_BLOCK_SIZE - 1);
...@@ -280,9 +316,11 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) ...@@ -280,9 +316,11 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
ret = skcipher_walk_done(&walk, nbytes - k); ret = skcipher_walk_done(&walk, nbytes - k);
} }
if (k < n) { if (k < n) {
if (__cbc_paes_set_key(ctx) != 0) if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO); return skcipher_walk_done(&walk, -EIO);
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE); memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
} }
} }
return ret; return ret;
...@@ -322,6 +360,7 @@ static int xts_paes_init(struct crypto_skcipher *tfm) ...@@ -322,6 +360,7 @@ static int xts_paes_init(struct crypto_skcipher *tfm)
ctx->kb[0].key = NULL; ctx->kb[0].key = NULL;
ctx->kb[1].key = NULL; ctx->kb[1].key = NULL;
spin_lock_init(&ctx->pk_lock);
return 0; return 0;
} }
...@@ -334,12 +373,27 @@ static void xts_paes_exit(struct crypto_skcipher *tfm) ...@@ -334,12 +373,27 @@ static void xts_paes_exit(struct crypto_skcipher *tfm)
_free_kb_keybuf(&ctx->kb[1]); _free_kb_keybuf(&ctx->kb[1]);
} }
static int __xts_paes_set_key(struct s390_pxts_ctx *ctx) static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
{
struct pkey_protkey pkey0, pkey1;
if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
__paes_keyblob2pkey(&ctx->kb[1], &pkey1))
return -EINVAL;
spin_lock_bh(&ctx->pk_lock);
memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0));
memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1));
spin_unlock_bh(&ctx->pk_lock);
return 0;
}
static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
{ {
unsigned long fc; unsigned long fc;
if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) || if (__xts_paes_convert_key(ctx))
__paes_convert_key(&ctx->kb[1], &ctx->pk[1]))
return -EINVAL; return -EINVAL;
if (ctx->pk[0].type != ctx->pk[1].type) if (ctx->pk[0].type != ctx->pk[1].type)
...@@ -416,15 +470,17 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) ...@@ -416,15 +470,17 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
ret = skcipher_walk_virt(&walk, req, false); ret = skcipher_walk_virt(&walk, req, false);
if (ret) if (ret)
return ret; return ret;
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64; keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0; offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
retry:
memset(&pcc_param, 0, sizeof(pcc_param)); memset(&pcc_param, 0, sizeof(pcc_param));
memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
spin_lock_bh(&ctx->pk_lock);
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
cpacf_pcc(ctx->fc, pcc_param.key + offset);
memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen); memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
spin_unlock_bh(&ctx->pk_lock);
cpacf_pcc(ctx->fc, pcc_param.key + offset);
memcpy(xts_param.init, pcc_param.xts, 16); memcpy(xts_param.init, pcc_param.xts, 16);
while ((nbytes = walk.nbytes) != 0) { while ((nbytes = walk.nbytes) != 0) {
...@@ -435,11 +491,15 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) ...@@ -435,11 +491,15 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
if (k) if (k)
ret = skcipher_walk_done(&walk, nbytes - k); ret = skcipher_walk_done(&walk, nbytes - k);
if (k < n) { if (k < n) {
if (__xts_paes_set_key(ctx) != 0) if (__xts_paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO); return skcipher_walk_done(&walk, -EIO);
goto retry; spin_lock_bh(&ctx->pk_lock);
memcpy(xts_param.key + offset,
ctx->pk[0].protkey, keylen);
spin_unlock_bh(&ctx->pk_lock);
} }
} }
return ret; return ret;
} }
...@@ -476,6 +536,7 @@ static int ctr_paes_init(struct crypto_skcipher *tfm) ...@@ -476,6 +536,7 @@ static int ctr_paes_init(struct crypto_skcipher *tfm)
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL; ctx->kb.key = NULL;
spin_lock_init(&ctx->pk_lock);
return 0; return 0;
} }
...@@ -487,11 +548,11 @@ static void ctr_paes_exit(struct crypto_skcipher *tfm) ...@@ -487,11 +548,11 @@ static void ctr_paes_exit(struct crypto_skcipher *tfm)
_free_kb_keybuf(&ctx->kb); _free_kb_keybuf(&ctx->kb);
} }
static int __ctr_paes_set_key(struct s390_paes_ctx *ctx) static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
{ {
unsigned long fc; unsigned long fc;
if (__paes_convert_key(&ctx->kb, &ctx->pk)) if (__paes_convert_key(ctx))
return -EINVAL; return -EINVAL;
/* Pick the correct function code based on the protected key type */ /* Pick the correct function code based on the protected key type */
...@@ -543,49 +604,65 @@ static int ctr_paes_crypt(struct skcipher_request *req) ...@@ -543,49 +604,65 @@ static int ctr_paes_crypt(struct skcipher_request *req)
struct skcipher_walk walk; struct skcipher_walk walk;
unsigned int nbytes, n, k; unsigned int nbytes, n, k;
int ret, locked; int ret, locked;
struct {
locked = spin_trylock(&ctrblk_lock); u8 key[MAXPROTKEYSIZE];
} param;
ret = skcipher_walk_virt(&walk, req, false); ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
locked = mutex_trylock(&ctrblk_lock);
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE; n = AES_BLOCK_SIZE;
if (nbytes >= 2*AES_BLOCK_SIZE && locked) if (nbytes >= 2*AES_BLOCK_SIZE && locked)
n = __ctrblk_init(ctrblk, walk.iv, nbytes); n = __ctrblk_init(ctrblk, walk.iv, nbytes);
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
k = cpacf_kmctr(ctx->fc, ctx->pk.protkey, walk.dst.virt.addr, k = cpacf_kmctr(ctx->fc, &param, walk.dst.virt.addr,
walk.src.virt.addr, n, ctrptr); walk.src.virt.addr, n, ctrptr);
if (k) { if (k) {
if (ctrptr == ctrblk) if (ctrptr == ctrblk)
memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE, memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
AES_BLOCK_SIZE); AES_BLOCK_SIZE);
crypto_inc(walk.iv, AES_BLOCK_SIZE); crypto_inc(walk.iv, AES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, nbytes - n); ret = skcipher_walk_done(&walk, nbytes - k);
} }
if (k < n) { if (k < n) {
if (__ctr_paes_set_key(ctx) != 0) { if (__paes_convert_key(ctx)) {
if (locked) if (locked)
spin_unlock(&ctrblk_lock); mutex_unlock(&ctrblk_lock);
return skcipher_walk_done(&walk, -EIO); return skcipher_walk_done(&walk, -EIO);
} }
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
} }
} }
if (locked) if (locked)
spin_unlock(&ctrblk_lock); mutex_unlock(&ctrblk_lock);
/* /*
* final block may be < AES_BLOCK_SIZE, copy only nbytes * final block may be < AES_BLOCK_SIZE, copy only nbytes
*/ */
if (nbytes) { if (nbytes) {
while (1) { while (1) {
if (cpacf_kmctr(ctx->fc, ctx->pk.protkey, buf, if (cpacf_kmctr(ctx->fc, &param, buf,
walk.src.virt.addr, AES_BLOCK_SIZE, walk.src.virt.addr, AES_BLOCK_SIZE,
walk.iv) == AES_BLOCK_SIZE) walk.iv) == AES_BLOCK_SIZE)
break; break;
if (__ctr_paes_set_key(ctx) != 0) if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO); return skcipher_walk_done(&walk, -EIO);
spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
spin_unlock_bh(&ctx->pk_lock);
} }
memcpy(walk.dst.virt.addr, buf, nbytes); memcpy(walk.dst.virt.addr, buf, nbytes);
crypto_inc(walk.iv, AES_BLOCK_SIZE); crypto_inc(walk.iv, AES_BLOCK_SIZE);
ret = skcipher_walk_done(&walk, 0); ret = skcipher_walk_done(&walk, nbytes);
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment