Commit d8c6d188 authored by Ard Biesheuvel's avatar Ard Biesheuvel Committed by Herbert Xu

crypto: chelsio - permit asynchronous skcipher as fallback

Even though the chelsio driver implements asynchronous versions of
cbc(aes) and xts(aes), the fallbacks it allocates are required to be
synchronous. Given that SIMD based software implementations are usually
asynchronous as well, even though they rarely complete asynchronously
(this typically only happens in cases where the request was made from
softirq context, while SIMD was already in use in the task context that
it interrupted), these implementations are disregarded, and either the
generic C version or another table based version implemented in assembler
is selected instead.

Since falling back to synchronous AES is not only a performance issue, but
potentially a security issue as well (due to the fact that table based AES
is not time invariant), let's fix this, by allocating an ordinary skcipher
as the fallback, and invoke it with the completion routine that was given
to the outer request.
Signed-off-by: default avatarArd Biesheuvel <ardb@kernel.org>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 413b61ce
...@@ -690,26 +690,22 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src, ...@@ -690,26 +690,22 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
return min(srclen, dstlen); return min(srclen, dstlen);
} }
static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher, static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
u32 flags, struct skcipher_request *req,
struct scatterlist *src,
struct scatterlist *dst,
unsigned int nbytes,
u8 *iv, u8 *iv,
unsigned short op_type) unsigned short op_type)
{ {
struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
int err; int err;
SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher); skcipher_request_set_tfm(&reqctx->fallback_req, cipher);
skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags,
skcipher_request_set_sync_tfm(subreq, cipher); req->base.complete, req->base.data);
skcipher_request_set_callback(subreq, flags, NULL, NULL); skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst,
skcipher_request_set_crypt(subreq, src, dst, req->cryptlen, iv);
nbytes, iv);
err = op_type ? crypto_skcipher_decrypt(subreq) : err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) :
crypto_skcipher_encrypt(subreq); crypto_skcipher_encrypt(&reqctx->fallback_req);
skcipher_request_zero(subreq);
return err; return err;
...@@ -924,11 +920,11 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, ...@@ -924,11 +920,11 @@ static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher,
{ {
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher, crypto_skcipher_clear_flags(ablkctx->sw_cipher,
CRYPTO_TFM_REQ_MASK); CRYPTO_TFM_REQ_MASK);
crypto_sync_skcipher_set_flags(ablkctx->sw_cipher, crypto_skcipher_set_flags(ablkctx->sw_cipher,
cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
return crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen); return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
} }
static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher,
...@@ -1206,12 +1202,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req, ...@@ -1206,12 +1202,7 @@ static int chcr_handle_cipher_resp(struct skcipher_request *req,
req); req);
memcpy(req->iv, reqctx->init_iv, IV); memcpy(req->iv, reqctx->init_iv, IV);
atomic_inc(&adap->chcr_stats.fallback); atomic_inc(&adap->chcr_stats.fallback);
err = chcr_cipher_fallback(ablkctx->sw_cipher, err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv,
req->base.flags,
req->src,
req->dst,
req->cryptlen,
req->iv,
reqctx->op); reqctx->op);
goto complete; goto complete;
} }
...@@ -1341,11 +1332,7 @@ static int process_cipher(struct skcipher_request *req, ...@@ -1341,11 +1332,7 @@ static int process_cipher(struct skcipher_request *req,
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
req); req);
fallback: atomic_inc(&adap->chcr_stats.fallback); fallback: atomic_inc(&adap->chcr_stats.fallback);
err = chcr_cipher_fallback(ablkctx->sw_cipher, err = chcr_cipher_fallback(ablkctx->sw_cipher, req,
req->base.flags,
req->src,
req->dst,
req->cryptlen,
subtype == subtype ==
CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ?
reqctx->iv : req->iv, reqctx->iv : req->iv,
...@@ -1486,14 +1473,15 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm) ...@@ -1486,14 +1473,15 @@ static int chcr_init_tfm(struct crypto_skcipher *tfm)
struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx); struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->base.cra_name, 0, ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0,
CRYPTO_ALG_NEED_FALLBACK); CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) { if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher); return PTR_ERR(ablkctx->sw_cipher);
} }
init_completion(&ctx->cbc_aes_aio_done); init_completion(&ctx->cbc_aes_aio_done);
crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
crypto_skcipher_reqsize(ablkctx->sw_cipher));
return chcr_device_init(ctx); return chcr_device_init(ctx);
} }
...@@ -1507,13 +1495,14 @@ static int chcr_rfc3686_init(struct crypto_skcipher *tfm) ...@@ -1507,13 +1495,14 @@ static int chcr_rfc3686_init(struct crypto_skcipher *tfm)
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
* cannot be used as fallback in chcr_handle_cipher_response * cannot be used as fallback in chcr_handle_cipher_response
*/ */
ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0, ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
CRYPTO_ALG_NEED_FALLBACK); CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) { if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); pr_err("failed to allocate fallback for %s\n", alg->base.cra_name);
return PTR_ERR(ablkctx->sw_cipher); return PTR_ERR(ablkctx->sw_cipher);
} }
crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx)); crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) +
crypto_skcipher_reqsize(ablkctx->sw_cipher));
return chcr_device_init(ctx); return chcr_device_init(ctx);
} }
...@@ -1523,7 +1512,7 @@ static void chcr_exit_tfm(struct crypto_skcipher *tfm) ...@@ -1523,7 +1512,7 @@ static void chcr_exit_tfm(struct crypto_skcipher *tfm)
struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct chcr_context *ctx = crypto_skcipher_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx); struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
crypto_free_sync_skcipher(ablkctx->sw_cipher); crypto_free_skcipher(ablkctx->sw_cipher);
} }
static int get_alg_config(struct algo_param *params, static int get_alg_config(struct algo_param *params,
......
...@@ -171,7 +171,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm) ...@@ -171,7 +171,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
} }
struct ablk_ctx { struct ablk_ctx {
struct crypto_sync_skcipher *sw_cipher; struct crypto_skcipher *sw_cipher;
__be32 key_ctx_hdr; __be32 key_ctx_hdr;
unsigned int enckey_len; unsigned int enckey_len;
unsigned char ciph_mode; unsigned char ciph_mode;
...@@ -305,6 +305,7 @@ struct chcr_skcipher_req_ctx { ...@@ -305,6 +305,7 @@ struct chcr_skcipher_req_ctx {
u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN]; u8 init_iv[CHCR_MAX_CRYPTO_IV_LEN];
u16 txqidx; u16 txqidx;
u16 rxqidx; u16 rxqidx;
struct skcipher_request fallback_req; // keep at the end
}; };
struct chcr_alg_template { struct chcr_alg_template {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment