Commit 0ba3c026 authored by Herbert Xu's avatar Herbert Xu

crypto: skcipher - Unmap pages after an external error

skcipher_walk_done may be called with an error by internal or
external callers.  For those internal callers we shouldn't unmap
pages but for external callers we must unmap any pages that are
in use.

This patch distinguishes between the two cases by checking whether
walk->nbytes is zero or not.  For internal callers, we now set
walk->nbytes to zero prior to the call.  For external callers,
walk->nbytes has always been non-zero (as zero is used to indicate
the termination of a walk).
Reported-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Fixes: 5cde0af2 ("[CRYPTO] cipher: Added block cipher type")
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
Tested-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 7b865ec1
......@@ -90,7 +90,7 @@ static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
return max(start, end_page);
}
static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
{
u8 *addr;
......@@ -98,19 +98,21 @@ static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
addr = skcipher_get_spot(addr, bsize);
scatterwalk_copychunks(addr, &walk->out, bsize,
(walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
return 0;
}
int skcipher_walk_done(struct skcipher_walk *walk, int err)
{
unsigned int n; /* bytes processed */
bool more;
unsigned int n = walk->nbytes;
unsigned int nbytes = 0;
if (unlikely(err < 0))
if (!n)
goto finish;
n = walk->nbytes - err;
walk->total -= n;
more = (walk->total != 0);
if (likely(err >= 0)) {
n -= err;
nbytes = walk->total - n;
}
if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
SKCIPHER_WALK_SLOW |
......@@ -126,7 +128,7 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
memcpy(walk->dst.virt.addr, walk->page, n);
skcipher_unmap_dst(walk);
} else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
if (err) {
if (err > 0) {
/*
* Didn't process all bytes. Either the algorithm is
* broken, or this was the last step and it turned out
......@@ -134,27 +136,29 @@ int skcipher_walk_done(struct skcipher_walk *walk, int err)
* the algorithm requires it.
*/
err = -EINVAL;
goto finish;
}
skcipher_done_slow(walk, n);
goto already_advanced;
nbytes = 0;
} else
n = skcipher_done_slow(walk, n);
}
if (err > 0)
err = 0;
walk->total = nbytes;
walk->nbytes = 0;
scatterwalk_advance(&walk->in, n);
scatterwalk_advance(&walk->out, n);
already_advanced:
scatterwalk_done(&walk->in, 0, more);
scatterwalk_done(&walk->out, 1, more);
scatterwalk_done(&walk->in, 0, nbytes);
scatterwalk_done(&walk->out, 1, nbytes);
if (more) {
if (nbytes) {
crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
CRYPTO_TFM_REQ_MAY_SLEEP : 0);
return skcipher_walk_next(walk);
}
err = 0;
finish:
walk->nbytes = 0;
finish:
/* Short-circuit for the common/fast path. */
if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment