Commit c821f6ab authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: skcipher - introduce walksize attribute for SIMD algos

In some cases, SIMD algorithms can only perform optimally when
allowed to operate on multiple input blocks in parallel. This is
especially true for bit slicing algorithms, which typically take
the same amount of time processing a single block or 8 blocks in
parallel. However, other SIMD algorithms may benefit as well from
bigger strides.

So add a walksize attribute to the skcipher algorithm definition, and
wire it up to the skcipher walk API. To avoid confusion between the
skcipher and AEAD attributes, rename the skcipher_walk chunksize
attribute to 'stride', and set it from the walksize (in the skcipher
case) or from the chunksize (in the AEAD case).
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent d79b5d0b
...@@ -185,12 +185,12 @@ void skcipher_walk_complete(struct skcipher_walk *walk, int err) ...@@ -185,12 +185,12 @@ void skcipher_walk_complete(struct skcipher_walk *walk, int err)
data = p->data; data = p->data;
if (!data) { if (!data) {
data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
data = skcipher_get_spot(data, walk->chunksize); data = skcipher_get_spot(data, walk->stride);
} }
scatterwalk_copychunks(data, &p->dst, p->len, 1); scatterwalk_copychunks(data, &p->dst, p->len, 1);
if (offset_in_page(p->data) + p->len + walk->chunksize > if (offset_in_page(p->data) + p->len + walk->stride >
PAGE_SIZE) PAGE_SIZE)
free_page((unsigned long)p->data); free_page((unsigned long)p->data);
...@@ -299,7 +299,7 @@ static int skcipher_next_copy(struct skcipher_walk *walk) ...@@ -299,7 +299,7 @@ static int skcipher_next_copy(struct skcipher_walk *walk)
p->len = walk->nbytes; p->len = walk->nbytes;
skcipher_queue_write(walk, p); skcipher_queue_write(walk, p);
if (offset_in_page(walk->page) + walk->nbytes + walk->chunksize > if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
PAGE_SIZE) PAGE_SIZE)
walk->page = NULL; walk->page = NULL;
else else
...@@ -344,7 +344,7 @@ static int skcipher_walk_next(struct skcipher_walk *walk) ...@@ -344,7 +344,7 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
SKCIPHER_WALK_DIFF); SKCIPHER_WALK_DIFF);
n = walk->total; n = walk->total;
bsize = min(walk->chunksize, max(n, walk->blocksize)); bsize = min(walk->stride, max(n, walk->blocksize));
n = scatterwalk_clamp(&walk->in, n); n = scatterwalk_clamp(&walk->in, n);
n = scatterwalk_clamp(&walk->out, n); n = scatterwalk_clamp(&walk->out, n);
...@@ -393,7 +393,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk) ...@@ -393,7 +393,7 @@ static int skcipher_copy_iv(struct skcipher_walk *walk)
unsigned a = crypto_tfm_ctx_alignment() - 1; unsigned a = crypto_tfm_ctx_alignment() - 1;
unsigned alignmask = walk->alignmask; unsigned alignmask = walk->alignmask;
unsigned ivsize = walk->ivsize; unsigned ivsize = walk->ivsize;
unsigned bs = walk->chunksize; unsigned bs = walk->stride;
unsigned aligned_bs; unsigned aligned_bs;
unsigned size; unsigned size;
u8 *iv; u8 *iv;
...@@ -463,7 +463,7 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, ...@@ -463,7 +463,7 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk,
SKCIPHER_WALK_SLEEP : 0; SKCIPHER_WALK_SLEEP : 0;
walk->blocksize = crypto_skcipher_blocksize(tfm); walk->blocksize = crypto_skcipher_blocksize(tfm);
walk->chunksize = crypto_skcipher_chunksize(tfm); walk->stride = crypto_skcipher_walksize(tfm);
walk->ivsize = crypto_skcipher_ivsize(tfm); walk->ivsize = crypto_skcipher_ivsize(tfm);
walk->alignmask = crypto_skcipher_alignmask(tfm); walk->alignmask = crypto_skcipher_alignmask(tfm);
...@@ -525,7 +525,7 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, ...@@ -525,7 +525,7 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
walk->flags &= ~SKCIPHER_WALK_SLEEP; walk->flags &= ~SKCIPHER_WALK_SLEEP;
walk->blocksize = crypto_aead_blocksize(tfm); walk->blocksize = crypto_aead_blocksize(tfm);
walk->chunksize = crypto_aead_chunksize(tfm); walk->stride = crypto_aead_chunksize(tfm);
walk->ivsize = crypto_aead_ivsize(tfm); walk->ivsize = crypto_aead_ivsize(tfm);
walk->alignmask = crypto_aead_alignmask(tfm); walk->alignmask = crypto_aead_alignmask(tfm);
...@@ -821,6 +821,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) ...@@ -821,6 +821,7 @@ static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
seq_printf(m, "ivsize : %u\n", skcipher->ivsize); seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
seq_printf(m, "chunksize : %u\n", skcipher->chunksize); seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
seq_printf(m, "walksize : %u\n", skcipher->walksize);
} }
#ifdef CONFIG_NET #ifdef CONFIG_NET
...@@ -893,11 +894,14 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg) ...@@ -893,11 +894,14 @@ static int skcipher_prepare_alg(struct skcipher_alg *alg)
{ {
struct crypto_alg *base = &alg->base; struct crypto_alg *base = &alg->base;
if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8) if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
alg->walksize > PAGE_SIZE / 8)
return -EINVAL; return -EINVAL;
if (!alg->chunksize) if (!alg->chunksize)
alg->chunksize = base->cra_blocksize; alg->chunksize = base->cra_blocksize;
if (!alg->walksize)
alg->walksize = alg->chunksize;
base->cra_type = &crypto_skcipher_type2; base->cra_type = &crypto_skcipher_type2;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
......
...@@ -66,7 +66,7 @@ struct skcipher_walk { ...@@ -66,7 +66,7 @@ struct skcipher_walk {
int flags; int flags;
unsigned int blocksize; unsigned int blocksize;
unsigned int chunksize; unsigned int stride;
unsigned int alignmask; unsigned int alignmask;
}; };
......
...@@ -115,6 +115,9 @@ struct crypto_skcipher { ...@@ -115,6 +115,9 @@ struct crypto_skcipher {
* IV of exactly that size to perform the encrypt or decrypt operation. * IV of exactly that size to perform the encrypt or decrypt operation.
* @chunksize: Equal to the block size except for stream ciphers such as * @chunksize: Equal to the block size except for stream ciphers such as
* CTR where it is set to the underlying block size. * CTR where it is set to the underlying block size.
* @walksize: Equal to the chunk size except in cases where the algorithm is
* considerably more efficient if it can operate on multiple chunks
* in parallel. Should be a multiple of chunksize.
* @base: Definition of a generic crypto algorithm. * @base: Definition of a generic crypto algorithm.
* *
* All fields except @ivsize are mandatory and must be filled. * All fields except @ivsize are mandatory and must be filled.
...@@ -131,6 +134,7 @@ struct skcipher_alg { ...@@ -131,6 +134,7 @@ struct skcipher_alg {
unsigned int max_keysize; unsigned int max_keysize;
unsigned int ivsize; unsigned int ivsize;
unsigned int chunksize; unsigned int chunksize;
unsigned int walksize;
struct crypto_alg base; struct crypto_alg base;
}; };
...@@ -289,6 +293,19 @@ static inline unsigned int crypto_skcipher_alg_chunksize( ...@@ -289,6 +293,19 @@ static inline unsigned int crypto_skcipher_alg_chunksize(
return alg->chunksize; return alg->chunksize;
} }
static inline unsigned int crypto_skcipher_alg_walksize(
struct skcipher_alg *alg)
{
if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_BLKCIPHER)
return alg->base.cra_blocksize;
if (alg->base.cra_ablkcipher.encrypt)
return alg->base.cra_blocksize;
return alg->walksize;
}
/** /**
* crypto_skcipher_chunksize() - obtain chunk size * crypto_skcipher_chunksize() - obtain chunk size
* @tfm: cipher handle * @tfm: cipher handle
...@@ -306,6 +323,23 @@ static inline unsigned int crypto_skcipher_chunksize( ...@@ -306,6 +323,23 @@ static inline unsigned int crypto_skcipher_chunksize(
return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm)); return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
} }
/**
* crypto_skcipher_walksize() - obtain walk size
* @tfm: cipher handle
*
* In some cases, algorithms can only perform optimally when operating on
* multiple blocks in parallel. This is reflected by the walksize, which
* must be a multiple of the chunksize (or equal if the concern does not
* apply)
*
* Return: walk size in bytes
*/
static inline unsigned int crypto_skcipher_walksize(
struct crypto_skcipher *tfm)
{
return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm));
}
/** /**
* crypto_skcipher_blocksize() - obtain block size of cipher * crypto_skcipher_blocksize() - obtain block size of cipher
* @tfm: cipher handle * @tfm: cipher handle
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment