Commit 6a8487a1 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: scompress - defer allocation of scratch buffer to first use

The scompress code allocates 2 x 128 KB of scratch buffers for each CPU,
so that clients of the async API can use synchronous implementations
even from atomic context. However, on systems such as Cavium Thunderx
(which has 96 cores), this adds up to a non-negligible 24 MB. Also,
32-bit systems may prefer to use their precious vmalloc space for other
things,especially since there don't appear to be any clients for the
async compression API yet.

So let's defer allocation of the scratch buffers until the first time
we allocate an acompress cipher based on an scompress implementation.
Signed-off-by: default avatarArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent cc4d110e
...@@ -65,11 +65,6 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg) ...@@ -65,11 +65,6 @@ static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
seq_puts(m, "type : scomp\n"); seq_puts(m, "type : scomp\n");
} }
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
{
return 0;
}
static void crypto_scomp_free_scratches(void * __percpu *scratches) static void crypto_scomp_free_scratches(void * __percpu *scratches)
{ {
int i; int i;
...@@ -134,6 +129,17 @@ static int crypto_scomp_alloc_all_scratches(void) ...@@ -134,6 +129,17 @@ static int crypto_scomp_alloc_all_scratches(void)
return 0; return 0;
} }
static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
{
int ret;
mutex_lock(&scomp_lock);
ret = crypto_scomp_alloc_all_scratches();
mutex_unlock(&scomp_lock);
return ret;
}
static void crypto_scomp_sg_free(struct scatterlist *sgl) static void crypto_scomp_sg_free(struct scatterlist *sgl)
{ {
int i, n; int i, n;
...@@ -241,6 +247,10 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm) ...@@ -241,6 +247,10 @@ static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
struct crypto_scomp **ctx = crypto_tfm_ctx(tfm); struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
crypto_free_scomp(*ctx); crypto_free_scomp(*ctx);
mutex_lock(&scomp_lock);
crypto_scomp_free_all_scratches();
mutex_unlock(&scomp_lock);
} }
int crypto_init_scomp_ops_async(struct crypto_tfm *tfm) int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
...@@ -317,40 +327,18 @@ static const struct crypto_type crypto_scomp_type = { ...@@ -317,40 +327,18 @@ static const struct crypto_type crypto_scomp_type = {
int crypto_register_scomp(struct scomp_alg *alg) int crypto_register_scomp(struct scomp_alg *alg)
{ {
struct crypto_alg *base = &alg->base; struct crypto_alg *base = &alg->base;
int ret = -ENOMEM;
mutex_lock(&scomp_lock);
if (crypto_scomp_alloc_all_scratches())
goto error;
base->cra_type = &crypto_scomp_type; base->cra_type = &crypto_scomp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS; base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
ret = crypto_register_alg(base); return crypto_register_alg(base);
if (ret)
goto error;
mutex_unlock(&scomp_lock);
return ret;
error:
crypto_scomp_free_all_scratches();
mutex_unlock(&scomp_lock);
return ret;
} }
EXPORT_SYMBOL_GPL(crypto_register_scomp); EXPORT_SYMBOL_GPL(crypto_register_scomp);
int crypto_unregister_scomp(struct scomp_alg *alg) int crypto_unregister_scomp(struct scomp_alg *alg)
{ {
int ret; return crypto_unregister_alg(&alg->base);
mutex_lock(&scomp_lock);
ret = crypto_unregister_alg(&alg->base);
crypto_scomp_free_all_scratches();
mutex_unlock(&scomp_lock);
return ret;
} }
EXPORT_SYMBOL_GPL(crypto_unregister_scomp); EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment