Commit 0f533e67 authored by Eric Biggers's avatar Eric Biggers Committed by Herbert Xu

crypto: aegis - fix handling chunked inputs

The generic AEGIS implementations all fail the improved AEAD tests
because they produce the wrong result with some data layouts.  The issue
is that they assume that if the skcipher_walk API gives 'nbytes' not
aligned to the walksize (a.k.a. walk.stride), then it is the end of the
data.  In fact, this can happen before the end.  Fix them.

Fixes: f606a88e ("crypto: aegis - Add generic AEGIS AEAD implementations")
Cc: <stable@vger.kernel.org> # v4.18+
Cc: Ondrej Mosnacek <omosnace@redhat.com>
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarOndrej Mosnacek <omosnace@redhat.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 42e95d1f
No related merge requests found
...@@ -286,19 +286,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state, ...@@ -286,19 +286,19 @@ static void crypto_aegis128_process_crypt(struct aegis_state *state,
const struct aegis128_ops *ops) const struct aegis128_ops *ops)
{ {
struct skcipher_walk walk; struct skcipher_walk walk;
u8 *src, *dst;
unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false); ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) { while (walk.nbytes) {
src = walk.src.virt.addr; unsigned int nbytes = walk.nbytes;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
ops->crypt_chunk(state, dst, src, chunksize); if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0); ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes);
} }
} }
......
...@@ -349,19 +349,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state, ...@@ -349,19 +349,19 @@ static void crypto_aegis128l_process_crypt(struct aegis_state *state,
const struct aegis128l_ops *ops) const struct aegis128l_ops *ops)
{ {
struct skcipher_walk walk; struct skcipher_walk walk;
u8 *src, *dst;
unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false); ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) { while (walk.nbytes) {
src = walk.src.virt.addr; unsigned int nbytes = walk.nbytes;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
ops->crypt_chunk(state, dst, src, chunksize); if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0); ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes);
} }
} }
......
...@@ -299,19 +299,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state, ...@@ -299,19 +299,19 @@ static void crypto_aegis256_process_crypt(struct aegis_state *state,
const struct aegis256_ops *ops) const struct aegis256_ops *ops)
{ {
struct skcipher_walk walk; struct skcipher_walk walk;
u8 *src, *dst;
unsigned int chunksize;
ops->skcipher_walk_init(&walk, req, false); ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) { while (walk.nbytes) {
src = walk.src.virt.addr; unsigned int nbytes = walk.nbytes;
dst = walk.dst.virt.addr;
chunksize = walk.nbytes;
ops->crypt_chunk(state, dst, src, chunksize); if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0); ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment