Commit a260436c authored by Gilad Ben-Yossef's avatar Gilad Ben-Yossef Committed by Herbert Xu

crypto: ccree - use fine grained DMA mapping dir

Use a fine grained specification of DMA mapping directions
in certain cases, allowing both a more optimized operation
as well as shushing out a harmless, though persky
dma-debug warning.
Signed-off-by: default avatarGilad Ben-Yossef <gilad@benyossef.com>
Reported-by: default avatarCorentin Labbe <clabbe.montjoie@gmail.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 476c9ab7
...@@ -356,12 +356,14 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx, ...@@ -356,12 +356,14 @@ void cc_unmap_cipher_request(struct device *dev, void *ctx,
req_ctx->mlli_params.mlli_dma_addr); req_ctx->mlli_params.mlli_dma_addr);
} }
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
if (src != dst) { if (src != dst) {
dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_BIDIRECTIONAL); dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE);
dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE);
dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst));
dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
} else {
dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL);
dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src));
} }
} }
...@@ -377,6 +379,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, ...@@ -377,6 +379,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
u32 dummy = 0; u32 dummy = 0;
int rc = 0; int rc = 0;
u32 mapped_nents = 0; u32 mapped_nents = 0;
int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; req_ctx->dma_buf_type = CC_DMA_BUF_DLLI;
mlli_params->curr_pool = NULL; mlli_params->curr_pool = NULL;
...@@ -399,7 +402,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, ...@@ -399,7 +402,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
} }
/* Map the src SGL */ /* Map the src SGL */
rc = cc_map_sg(dev, src, nbytes, DMA_BIDIRECTIONAL, &req_ctx->in_nents, rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents);
if (rc) if (rc)
goto cipher_exit; goto cipher_exit;
...@@ -416,7 +419,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, ...@@ -416,7 +419,7 @@ int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx,
} }
} else { } else {
/* Map the dst sg */ /* Map the dst sg */
rc = cc_map_sg(dev, dst, nbytes, DMA_BIDIRECTIONAL, rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE,
&req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES,
&dummy, &mapped_nents); &dummy, &mapped_nents);
if (rc) if (rc)
...@@ -456,6 +459,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) ...@@ -456,6 +459,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
struct aead_req_ctx *areq_ctx = aead_request_ctx(req); struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size; unsigned int hw_iv_size = areq_ctx->hw_iv_size;
struct cc_drvdata *drvdata = dev_get_drvdata(dev); struct cc_drvdata *drvdata = dev_get_drvdata(dev);
int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
if (areq_ctx->mac_buf_dma_addr) { if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
...@@ -514,13 +518,11 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req) ...@@ -514,13 +518,11 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
areq_ctx->assoclen, req->cryptlen); areq_ctx->assoclen, req->cryptlen);
dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction);
DMA_BIDIRECTIONAL);
if (req->src != req->dst) { if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst)); sg_virt(req->dst));
dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE);
DMA_BIDIRECTIONAL);
} }
if (drvdata->coherent && if (drvdata->coherent &&
areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT &&
...@@ -843,7 +845,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata, ...@@ -843,7 +845,7 @@ static int cc_aead_chain_data(struct cc_drvdata *drvdata,
else else
size_for_map -= authsize; size_for_map -= authsize;
rc = cc_map_sg(dev, req->dst, size_for_map, DMA_BIDIRECTIONAL, rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE,
&areq_ctx->dst.mapped_nents, &areq_ctx->dst.mapped_nents,
LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes,
&dst_mapped_nents); &dst_mapped_nents);
...@@ -1056,7 +1058,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) ...@@ -1056,7 +1058,8 @@ int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req)
size_to_map += authsize; size_to_map += authsize;
} }
rc = cc_map_sg(dev, req->src, size_to_map, DMA_BIDIRECTIONAL, rc = cc_map_sg(dev, req->src, size_to_map,
(req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL),
&areq_ctx->src.mapped_nents, &areq_ctx->src.mapped_nents,
(LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES +
LLI_MAX_NUM_OF_DATA_ENTRIES), LLI_MAX_NUM_OF_DATA_ENTRIES),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment