Commit 359e893e authored by Corentin Labbe's avatar Corentin Labbe Committed by Herbert Xu

crypto: sun8i-ss - rework handling of IV

sun8i-ss fail handling IVs when doing decryption of multiple SGs in-place.
It should backup the last block of each SG source for using it later as
IVs.
In the same time remove allocation on requests path for storing all
IVs.

Fixes: f08fcced ("crypto: allwinner - Add sun8i-ss cryptographic offloader")
Signed-off-by: default avatarCorentin Labbe <clabbe@baylibre.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 22d03a0a
...@@ -93,6 +93,68 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) ...@@ -93,6 +93,68 @@ static int sun8i_ss_cipher_fallback(struct skcipher_request *areq)
return err; return err;
} }
static int sun8i_ss_setup_ivs(struct skcipher_request *areq)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm);
struct sun8i_ss_dev *ss = op->ss;
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct scatterlist *sg = areq->src;
unsigned int todo, offset;
unsigned int len = areq->cryptlen;
unsigned int ivsize = crypto_skcipher_ivsize(tfm);
struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
int i = 0;
u32 a;
int err;
rctx->ivlen = ivsize;
if (rctx->op_dir & SS_DECRYPTION) {
offset = areq->cryptlen - ivsize;
scatterwalk_map_and_copy(sf->biv, areq->src, offset,
ivsize, 0);
}
/* we need to copy all IVs from source in case DMA is bi-directionnal */
while (sg && len) {
if (sg_dma_len(sg) == 0) {
sg = sg_next(sg);
continue;
}
if (i == 0)
memcpy(sf->iv[0], areq->iv, ivsize);
a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(ss->dev, a)) {
memzero_explicit(sf->iv[i], ivsize);
dev_err(ss->dev, "Cannot DMA MAP IV\n");
err = -EFAULT;
goto dma_iv_error;
}
rctx->p_iv[i] = a;
/* we need to setup all others IVs only in the decrypt way */
if (rctx->op_dir & SS_ENCRYPTION)
return 0;
todo = min(len, sg_dma_len(sg));
len -= todo;
i++;
if (i < MAX_SG) {
offset = sg->length - ivsize;
scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0);
}
rctx->niv = i;
sg = sg_next(sg);
}
return 0;
dma_iv_error:
i--;
while (i >= 0) {
dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
memzero_explicit(sf->iv[i], ivsize);
}
return err;
}
static int sun8i_ss_cipher(struct skcipher_request *areq) static int sun8i_ss_cipher(struct skcipher_request *areq)
{ {
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
...@@ -101,9 +163,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) ...@@ -101,9 +163,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq);
struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
struct sun8i_ss_alg_template *algt; struct sun8i_ss_alg_template *algt;
struct sun8i_ss_flow *sf = &ss->flows[rctx->flow];
struct scatterlist *sg; struct scatterlist *sg;
unsigned int todo, len, offset, ivsize; unsigned int todo, len, offset, ivsize;
void *backup_iv = NULL;
int nr_sgs = 0; int nr_sgs = 0;
int nr_sgd = 0; int nr_sgd = 0;
int err = 0; int err = 0;
...@@ -134,30 +196,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) ...@@ -134,30 +196,9 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
ivsize = crypto_skcipher_ivsize(tfm); ivsize = crypto_skcipher_ivsize(tfm);
if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) {
rctx->ivlen = ivsize; err = sun8i_ss_setup_ivs(areq);
rctx->biv = kzalloc(ivsize, GFP_KERNEL | GFP_DMA); if (err)
if (!rctx->biv) {
err = -ENOMEM;
goto theend_key; goto theend_key;
}
if (rctx->op_dir & SS_DECRYPTION) {
backup_iv = kzalloc(ivsize, GFP_KERNEL);
if (!backup_iv) {
err = -ENOMEM;
goto theend_key;
}
offset = areq->cryptlen - ivsize;
scatterwalk_map_and_copy(backup_iv, areq->src, offset,
ivsize, 0);
}
memcpy(rctx->biv, areq->iv, ivsize);
rctx->p_iv = dma_map_single(ss->dev, rctx->biv, rctx->ivlen,
DMA_TO_DEVICE);
if (dma_mapping_error(ss->dev, rctx->p_iv)) {
dev_err(ss->dev, "Cannot DMA MAP IV\n");
err = -ENOMEM;
goto theend_iv;
}
} }
if (areq->src == areq->dst) { if (areq->src == areq->dst) {
nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src),
...@@ -243,21 +284,19 @@ static int sun8i_ss_cipher(struct skcipher_request *areq) ...@@ -243,21 +284,19 @@ static int sun8i_ss_cipher(struct skcipher_request *areq)
} }
theend_iv: theend_iv:
if (rctx->p_iv)
dma_unmap_single(ss->dev, rctx->p_iv, rctx->ivlen,
DMA_TO_DEVICE);
if (areq->iv && ivsize > 0) { if (areq->iv && ivsize > 0) {
if (rctx->biv) { for (i = 0; i < rctx->niv; i++) {
offset = areq->cryptlen - ivsize; dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE);
if (rctx->op_dir & SS_DECRYPTION) { memzero_explicit(sf->iv[i], ivsize);
memcpy(areq->iv, backup_iv, ivsize); }
kfree_sensitive(backup_iv);
} else { offset = areq->cryptlen - ivsize;
scatterwalk_map_and_copy(areq->iv, areq->dst, offset, if (rctx->op_dir & SS_DECRYPTION) {
ivsize, 0); memcpy(areq->iv, sf->biv, ivsize);
} memzero_explicit(sf->biv, ivsize);
kfree(rctx->biv); } else {
scatterwalk_map_and_copy(areq->iv, areq->dst, offset,
ivsize, 0);
} }
} }
......
...@@ -66,6 +66,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx ...@@ -66,6 +66,7 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
const char *name) const char *name)
{ {
int flow = rctx->flow; int flow = rctx->flow;
unsigned int ivlen = rctx->ivlen;
u32 v = SS_START; u32 v = SS_START;
int i; int i;
...@@ -104,15 +105,14 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx ...@@ -104,15 +105,14 @@ int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx
mutex_lock(&ss->mlock); mutex_lock(&ss->mlock);
writel(rctx->p_key, ss->base + SS_KEY_ADR_REG); writel(rctx->p_key, ss->base + SS_KEY_ADR_REG);
if (i == 0) { if (ivlen) {
if (rctx->p_iv) if (rctx->op_dir == SS_ENCRYPTION) {
writel(rctx->p_iv, ss->base + SS_IV_ADR_REG); if (i == 0)
} else { writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG);
if (rctx->biv) {
if (rctx->op_dir == SS_ENCRYPTION)
writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG);
else else
writel(rctx->t_src[i - 1].addr + rctx->t_src[i - 1].len * 4 - rctx->ivlen, ss->base + SS_IV_ADR_REG); writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG);
} else {
writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG);
} }
} }
...@@ -464,7 +464,7 @@ static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i) ...@@ -464,7 +464,7 @@ static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i)
*/ */
static int allocate_flows(struct sun8i_ss_dev *ss) static int allocate_flows(struct sun8i_ss_dev *ss)
{ {
int i, err; int i, j, err;
ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow),
GFP_KERNEL); GFP_KERNEL);
...@@ -474,6 +474,18 @@ static int allocate_flows(struct sun8i_ss_dev *ss) ...@@ -474,6 +474,18 @@ static int allocate_flows(struct sun8i_ss_dev *ss)
for (i = 0; i < MAXFLOW; i++) { for (i = 0; i < MAXFLOW; i++) {
init_completion(&ss->flows[i].complete); init_completion(&ss->flows[i].complete);
ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
GFP_KERNEL | GFP_DMA);
if (!ss->flows[i].biv)
goto error_engine;
for (j = 0; j < MAX_SG; j++) {
ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE,
GFP_KERNEL | GFP_DMA);
if (!ss->flows[i].iv[j])
goto error_engine;
}
ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true);
if (!ss->flows[i].engine) { if (!ss->flows[i].engine) {
dev_err(ss->dev, "Cannot allocate engine\n"); dev_err(ss->dev, "Cannot allocate engine\n");
......
...@@ -121,11 +121,15 @@ struct sginfo { ...@@ -121,11 +121,15 @@ struct sginfo {
* @complete: completion for the current task on this flow * @complete: completion for the current task on this flow
* @status: set to 1 by interrupt if task is done * @status: set to 1 by interrupt if task is done
* @stat_req: number of request done by this flow * @stat_req: number of request done by this flow
* @iv: list of IV to use for each step
* @biv: buffer which contain the backuped IV
*/ */
struct sun8i_ss_flow { struct sun8i_ss_flow {
struct crypto_engine *engine; struct crypto_engine *engine;
struct completion complete; struct completion complete;
int status; int status;
u8 *iv[MAX_SG];
u8 *biv;
#ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG
unsigned long stat_req; unsigned long stat_req;
#endif #endif
...@@ -164,28 +168,28 @@ struct sun8i_ss_dev { ...@@ -164,28 +168,28 @@ struct sun8i_ss_dev {
* @t_src: list of mapped SGs with their size * @t_src: list of mapped SGs with their size
* @t_dst: list of mapped SGs with their size * @t_dst: list of mapped SGs with their size
* @p_key: DMA address of the key * @p_key: DMA address of the key
* @p_iv: DMA address of the IV * @p_iv: DMA address of the IVs
* @niv: Number of IVs DMA mapped
* @method: current algorithm for this request * @method: current algorithm for this request
* @op_mode: op_mode for this request * @op_mode: op_mode for this request
* @op_dir: direction (encrypt vs decrypt) for this request * @op_dir: direction (encrypt vs decrypt) for this request
* @flow: the flow to use for this request * @flow: the flow to use for this request
* @ivlen: size of biv * @ivlen: size of IVs
* @keylen: keylen for this request * @keylen: keylen for this request
* @biv: buffer which contain the IV
* @fallback_req: request struct for invoking the fallback skcipher TFM * @fallback_req: request struct for invoking the fallback skcipher TFM
*/ */
struct sun8i_cipher_req_ctx { struct sun8i_cipher_req_ctx {
struct sginfo t_src[MAX_SG]; struct sginfo t_src[MAX_SG];
struct sginfo t_dst[MAX_SG]; struct sginfo t_dst[MAX_SG];
u32 p_key; u32 p_key;
u32 p_iv; u32 p_iv[MAX_SG];
int niv;
u32 method; u32 method;
u32 op_mode; u32 op_mode;
u32 op_dir; u32 op_dir;
int flow; int flow;
unsigned int ivlen; unsigned int ivlen;
unsigned int keylen; unsigned int keylen;
void *biv;
struct skcipher_request fallback_req; // keep at the end struct skcipher_request fallback_req; // keep at the end
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment