Commit 7f725f41 authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: powerpc - convert SPE AES algorithms to skcipher API

Convert the glue code for the PowerPC SPE implementations of AES-ECB,
AES-CBC, AES-CTR, and AES-XTS from the deprecated "blkcipher" API to the
"skcipher" API.  This is needed in order for the blkcipher API to be
removed.

Tested with:

	export ARCH=powerpc CROSS_COMPILE=powerpc-linux-gnu-
	make mpc85xx_defconfig
	cat >> .config << EOF
	# CONFIG_MODULES is not set
	# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
	CONFIG_DEBUG_KERNEL=y
	CONFIG_CRYPTO_MANAGER_EXTRA_TESTS=y
	CONFIG_CRYPTO_AES=y
	CONFIG_CRYPTO_CBC=y
	CONFIG_CRYPTO_CTR=y
	CONFIG_CRYPTO_ECB=y
	CONFIG_CRYPTO_XTS=y
	CONFIG_CRYPTO_AES_PPC_SPE=y
	EOF
	make olddefconfig
	make -j32
	qemu-system-ppc -M mpc8544ds -cpu e500 -nographic \
		-kernel arch/powerpc/boot/zImage \
		-append cryptomgr.fuzz_iterations=1000

Note that xts-ppc-spe still fails the comparison tests due to the lack
of ciphertext stealing support.  This is not addressed by this patch.

This patch also cleans up the code by making ->encrypt() and ->decrypt()
call a common function for each of ECB, CBC, and XTS, and by using a
clearer way to compute the length to process at each step.
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 8255e65d
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <crypto/algapi.h> #include <crypto/algapi.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h> #include <crypto/xts.h>
/* /*
...@@ -118,13 +119,19 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key, ...@@ -118,13 +119,19 @@ static int ppc_aes_setkey(struct crypto_tfm *tfm, const u8 *in_key,
return 0; return 0;
} }
static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key, static int ppc_aes_setkey_skcipher(struct crypto_skcipher *tfm,
const u8 *in_key, unsigned int key_len)
{
return ppc_aes_setkey(crypto_skcipher_tfm(tfm), in_key, key_len);
}
static int ppc_xts_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len) unsigned int key_len)
{ {
struct ppc_xts_ctx *ctx = crypto_tfm_ctx(tfm); struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
int err; int err;
err = xts_check_key(tfm, in_key, key_len); err = xts_verify_key(tfm, in_key, key_len);
if (err) if (err)
return err; return err;
...@@ -133,7 +140,7 @@ static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key, ...@@ -133,7 +140,7 @@ static int ppc_xts_setkey(struct crypto_tfm *tfm, const u8 *in_key,
if (key_len != AES_KEYSIZE_128 && if (key_len != AES_KEYSIZE_128 &&
key_len != AES_KEYSIZE_192 && key_len != AES_KEYSIZE_192 &&
key_len != AES_KEYSIZE_256) { key_len != AES_KEYSIZE_256) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN; crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL; return -EINVAL;
} }
...@@ -178,201 +185,154 @@ static void ppc_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) ...@@ -178,201 +185,154 @@ static void ppc_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
spe_end(); spe_end();
} }
static int ppc_ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int ppc_ecb_crypt(struct skcipher_request *req, bool enc)
struct scatterlist *src, unsigned int nbytes)
{ {
struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct blkcipher_walk walk; struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int ubytes; struct skcipher_walk walk;
unsigned int nbytes;
int err; int err;
blkcipher_walk_init(&walk, dst, src, nbytes); err = skcipher_walk_virt(&walk, req, false);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) { while ((nbytes = walk.nbytes) != 0) {
ubytes = nbytes > MAX_BYTES ? nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); nbytes = round_down(nbytes, AES_BLOCK_SIZE);
nbytes -= ubytes;
spe_begin(); spe_begin();
if (enc)
ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr, ppc_encrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key_enc, ctx->rounds, nbytes); ctx->key_enc, ctx->rounds, nbytes);
else
ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key_dec, ctx->rounds, nbytes);
spe_end(); spe_end();
err = blkcipher_walk_done(desc, &walk, ubytes); err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
} }
return err; return err;
} }
static int ppc_ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int ppc_ecb_encrypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); return ppc_ecb_crypt(req, true);
struct blkcipher_walk walk; }
unsigned int ubytes;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
ubytes = nbytes > MAX_BYTES ?
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
nbytes -= ubytes;
spe_begin();
ppc_decrypt_ecb(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key_dec, ctx->rounds, nbytes);
spe_end();
err = blkcipher_walk_done(desc, &walk, ubytes);
}
return err; static int ppc_ecb_decrypt(struct skcipher_request *req)
{
return ppc_ecb_crypt(req, false);
} }
static int ppc_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int ppc_cbc_crypt(struct skcipher_request *req, bool enc)
struct scatterlist *src, unsigned int nbytes)
{ {
struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct blkcipher_walk walk; struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int ubytes; struct skcipher_walk walk;
unsigned int nbytes;
int err; int err;
blkcipher_walk_init(&walk, dst, src, nbytes); err = skcipher_walk_virt(&walk, req, false);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) { while ((nbytes = walk.nbytes) != 0) {
ubytes = nbytes > MAX_BYTES ? nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); nbytes = round_down(nbytes, AES_BLOCK_SIZE);
nbytes -= ubytes;
spe_begin(); spe_begin();
if (enc)
ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr, ppc_encrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key_enc, ctx->rounds, nbytes, walk.iv); ctx->key_enc, ctx->rounds, nbytes,
walk.iv);
else
ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key_dec, ctx->rounds, nbytes,
walk.iv);
spe_end(); spe_end();
err = blkcipher_walk_done(desc, &walk, ubytes); err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
} }
return err; return err;
} }
static int ppc_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int ppc_cbc_encrypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); return ppc_cbc_crypt(req, true);
struct blkcipher_walk walk; }
unsigned int ubytes;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while ((nbytes = walk.nbytes)) {
ubytes = nbytes > MAX_BYTES ?
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
nbytes -= ubytes;
spe_begin();
ppc_decrypt_cbc(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key_dec, ctx->rounds, nbytes, walk.iv);
spe_end();
err = blkcipher_walk_done(desc, &walk, ubytes);
}
return err; static int ppc_cbc_decrypt(struct skcipher_request *req)
{
return ppc_cbc_crypt(req, false);
} }
static int ppc_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int ppc_ctr_crypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct blkcipher_walk walk; struct ppc_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int pbytes, ubytes; struct skcipher_walk walk;
unsigned int nbytes;
int err; int err;
blkcipher_walk_init(&walk, dst, src, nbytes); err = skcipher_walk_virt(&walk, req, false);
err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
while ((pbytes = walk.nbytes)) { while ((nbytes = walk.nbytes) != 0) {
pbytes = pbytes > MAX_BYTES ? MAX_BYTES : pbytes; nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
pbytes = pbytes == nbytes ? if (nbytes < walk.total)
nbytes : pbytes & ~(AES_BLOCK_SIZE - 1); nbytes = round_down(nbytes, AES_BLOCK_SIZE);
ubytes = walk.nbytes - pbytes;
spe_begin(); spe_begin();
ppc_crypt_ctr(walk.dst.virt.addr, walk.src.virt.addr, ppc_crypt_ctr(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key_enc, ctx->rounds, pbytes , walk.iv); ctx->key_enc, ctx->rounds, nbytes, walk.iv);
spe_end(); spe_end();
nbytes -= pbytes; err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
err = blkcipher_walk_done(desc, &walk, ubytes);
} }
return err; return err;
} }
static int ppc_xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int ppc_xts_crypt(struct skcipher_request *req, bool enc)
struct scatterlist *src, unsigned int nbytes)
{ {
struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct blkcipher_walk walk; struct ppc_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
unsigned int ubytes; struct skcipher_walk walk;
unsigned int nbytes;
int err; int err;
u32 *twk; u32 *twk;
blkcipher_walk_init(&walk, dst, src, nbytes); err = skcipher_walk_virt(&walk, req, false);
err = blkcipher_walk_virt(desc, &walk);
twk = ctx->key_twk; twk = ctx->key_twk;
while ((nbytes = walk.nbytes)) { while ((nbytes = walk.nbytes) != 0) {
ubytes = nbytes > MAX_BYTES ? nbytes = min_t(unsigned int, nbytes, MAX_BYTES);
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1); nbytes = round_down(nbytes, AES_BLOCK_SIZE);
nbytes -= ubytes;
spe_begin(); spe_begin();
if (enc)
ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr, ppc_encrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key_enc, ctx->rounds, nbytes, walk.iv, twk); ctx->key_enc, ctx->rounds, nbytes,
walk.iv, twk);
else
ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key_dec, ctx->rounds, nbytes,
walk.iv, twk);
spe_end(); spe_end();
twk = NULL; twk = NULL;
err = blkcipher_walk_done(desc, &walk, ubytes); err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
} }
return err; return err;
} }
static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, static int ppc_xts_encrypt(struct skcipher_request *req)
struct scatterlist *src, unsigned int nbytes)
{ {
struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); return ppc_xts_crypt(req, true);
struct blkcipher_walk walk; }
unsigned int ubytes;
int err;
u32 *twk;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
twk = ctx->key_twk;
while ((nbytes = walk.nbytes)) {
ubytes = nbytes > MAX_BYTES ?
nbytes - MAX_BYTES : nbytes & (AES_BLOCK_SIZE - 1);
nbytes -= ubytes;
spe_begin();
ppc_decrypt_xts(walk.dst.virt.addr, walk.src.virt.addr,
ctx->key_dec, ctx->rounds, nbytes, walk.iv, twk);
spe_end();
twk = NULL;
err = blkcipher_walk_done(desc, &walk, ubytes);
}
return err; static int ppc_xts_decrypt(struct skcipher_request *req)
{
return ppc_xts_crypt(req, false);
} }
/* /*
...@@ -381,9 +341,9 @@ static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ...@@ -381,9 +341,9 @@ static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
* This improves IPsec thoughput by another few percent. Additionally we assume * This improves IPsec thoughput by another few percent. Additionally we assume
* that AES context is always aligned to at least 8 bytes because it is created * that AES context is always aligned to at least 8 bytes because it is created
* with kmalloc() in the crypto infrastructure * with kmalloc() in the crypto infrastructure
*
*/ */
static struct crypto_alg aes_algs[] = { {
static struct crypto_alg aes_cipher_alg = {
.cra_name = "aes", .cra_name = "aes",
.cra_driver_name = "aes-ppc-spe", .cra_driver_name = "aes-ppc-spe",
.cra_priority = 300, .cra_priority = 300,
...@@ -401,77 +361,55 @@ static struct crypto_alg aes_algs[] = { { ...@@ -401,77 +361,55 @@ static struct crypto_alg aes_algs[] = { {
.cia_decrypt = ppc_aes_decrypt .cia_decrypt = ppc_aes_decrypt
} }
} }
}, { };
.cra_name = "ecb(aes)",
.cra_driver_name = "ecb-ppc-spe", static struct skcipher_alg aes_skcipher_algs[] = {
.cra_priority = 300, {
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .base.cra_name = "ecb(aes)",
.cra_blocksize = AES_BLOCK_SIZE, .base.cra_driver_name = "ecb-ppc-spe",
.cra_ctxsize = sizeof(struct ppc_aes_ctx), .base.cra_priority = 300,
.cra_alignmask = 0, .base.cra_blocksize = AES_BLOCK_SIZE,
.cra_type = &crypto_blkcipher_type, .base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
.cra_module = THIS_MODULE, .base.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE,
.setkey = ppc_aes_setkey, .setkey = ppc_aes_setkey_skcipher,
.encrypt = ppc_ecb_encrypt, .encrypt = ppc_ecb_encrypt,
.decrypt = ppc_ecb_decrypt, .decrypt = ppc_ecb_decrypt,
} }, {
} .base.cra_name = "cbc(aes)",
}, { .base.cra_driver_name = "cbc-ppc-spe",
.cra_name = "cbc(aes)", .base.cra_priority = 300,
.cra_driver_name = "cbc-ppc-spe", .base.cra_blocksize = AES_BLOCK_SIZE,
.cra_priority = 300, .base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .base.cra_module = THIS_MODULE,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ppc_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE,
.setkey = ppc_aes_setkey, .setkey = ppc_aes_setkey_skcipher,
.encrypt = ppc_cbc_encrypt, .encrypt = ppc_cbc_encrypt,
.decrypt = ppc_cbc_decrypt, .decrypt = ppc_cbc_decrypt,
} }, {
} .base.cra_name = "ctr(aes)",
}, { .base.cra_driver_name = "ctr-ppc-spe",
.cra_name = "ctr(aes)", .base.cra_priority = 300,
.cra_driver_name = "ctr-ppc-spe", .base.cra_blocksize = 1,
.cra_priority = 300, .base.cra_ctxsize = sizeof(struct ppc_aes_ctx),
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .base.cra_module = THIS_MODULE,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct ppc_aes_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE, .min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE,
.setkey = ppc_aes_setkey, .setkey = ppc_aes_setkey_skcipher,
.encrypt = ppc_ctr_crypt, .encrypt = ppc_ctr_crypt,
.decrypt = ppc_ctr_crypt, .decrypt = ppc_ctr_crypt,
} .chunksize = AES_BLOCK_SIZE,
} }, {
}, { .base.cra_name = "xts(aes)",
.cra_name = "xts(aes)", .base.cra_driver_name = "xts-ppc-spe",
.cra_driver_name = "xts-ppc-spe", .base.cra_priority = 300,
.cra_priority = 300, .base.cra_blocksize = AES_BLOCK_SIZE,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .base.cra_ctxsize = sizeof(struct ppc_xts_ctx),
.cra_blocksize = AES_BLOCK_SIZE, .base.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ppc_xts_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_u = {
.blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE * 2, .min_keysize = AES_MIN_KEY_SIZE * 2,
.max_keysize = AES_MAX_KEY_SIZE * 2, .max_keysize = AES_MAX_KEY_SIZE * 2,
.ivsize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE,
...@@ -479,17 +417,28 @@ static struct crypto_alg aes_algs[] = { { ...@@ -479,17 +417,28 @@ static struct crypto_alg aes_algs[] = { {
.encrypt = ppc_xts_encrypt, .encrypt = ppc_xts_encrypt,
.decrypt = ppc_xts_decrypt, .decrypt = ppc_xts_decrypt,
} }
} };
} };
static int __init ppc_aes_mod_init(void) static int __init ppc_aes_mod_init(void)
{ {
return crypto_register_algs(aes_algs, ARRAY_SIZE(aes_algs)); int err;
err = crypto_register_alg(&aes_cipher_alg);
if (err)
return err;
err = crypto_register_skciphers(aes_skcipher_algs,
ARRAY_SIZE(aes_skcipher_algs));
if (err)
crypto_unregister_alg(&aes_cipher_alg);
return err;
} }
static void __exit ppc_aes_mod_fini(void) static void __exit ppc_aes_mod_fini(void)
{ {
crypto_unregister_algs(aes_algs, ARRAY_SIZE(aes_algs)); crypto_unregister_alg(&aes_cipher_alg);
crypto_unregister_skciphers(aes_skcipher_algs,
ARRAY_SIZE(aes_skcipher_algs));
} }
module_init(ppc_aes_mod_init); module_init(ppc_aes_mod_init);
......
...@@ -1125,6 +1125,7 @@ config CRYPTO_AES_SPARC64 ...@@ -1125,6 +1125,7 @@ config CRYPTO_AES_SPARC64
config CRYPTO_AES_PPC_SPE config CRYPTO_AES_PPC_SPE
tristate "AES cipher algorithms (PPC SPE)" tristate "AES cipher algorithms (PPC SPE)"
depends on PPC && SPE depends on PPC && SPE
select CRYPTO_BLKCIPHER
help help
AES cipher algorithms (FIPS-197). Additionally the acceleration AES cipher algorithms (FIPS-197). Additionally the acceleration
for popular block cipher modes ECB, CBC, CTR and XTS is supported. for popular block cipher modes ECB, CBC, CTR and XTS is supported.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment