Commit 289b2623 authored by Leilei Zhao's avatar Leilei Zhao Committed by Herbert Xu

crypto: atmel-aes - sync the buf used in DMA or CPU

The input buffer and output buffer are mapped for DMA transfer
in Atmel AES driver. But they are also be used by CPU when
the requested crypt length is not bigger than the threshold
value 16. The buffers will be cached in cache line when CPU
accessed them. When DMA uses the buffers again, the memory
can happened to be flushed by cache while DMA starts transfer.

So using API dma_sync_single_for_device and dma_sync_single_for_cpu
in DMA to ensure DMA coherence and CPU always access the correct
value. This fix the issue that the encrypted result periodically goes
wrong when doing performance test with OpenSSH.
Signed-off-by: default avatarLeilei Zhao <leilei.zhao@atmel.com>
Acked-by: default avatarNicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 8a10eb8d
...@@ -315,10 +315,10 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd, ...@@ -315,10 +315,10 @@ static int atmel_aes_crypt_dma(struct atmel_aes_dev *dd,
dd->dma_size = length; dd->dma_size = length;
if (!(dd->flags & AES_FLAGS_FAST)) {
dma_sync_single_for_device(dd->dev, dma_addr_in, length, dma_sync_single_for_device(dd->dev, dma_addr_in, length,
DMA_TO_DEVICE); DMA_TO_DEVICE);
} dma_sync_single_for_device(dd->dev, dma_addr_out, length,
DMA_FROM_DEVICE);
if (dd->flags & AES_FLAGS_CFB8) { if (dd->flags & AES_FLAGS_CFB8) {
dd->dma_lch_in.dma_conf.dst_addr_width = dd->dma_lch_in.dma_conf.dst_addr_width =
...@@ -391,6 +391,11 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd) ...@@ -391,6 +391,11 @@ static int atmel_aes_crypt_cpu_start(struct atmel_aes_dev *dd)
{ {
dd->flags &= ~AES_FLAGS_DMA; dd->flags &= ~AES_FLAGS_DMA;
dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
dd->dma_size, DMA_TO_DEVICE);
dma_sync_single_for_cpu(dd->dev, dd->dma_addr_out,
dd->dma_size, DMA_FROM_DEVICE);
/* use cache buffers */ /* use cache buffers */
dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg); dd->nb_in_sg = atmel_aes_sg_length(dd->req, dd->in_sg);
if (!dd->nb_in_sg) if (!dd->nb_in_sg)
...@@ -459,6 +464,9 @@ static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd) ...@@ -459,6 +464,9 @@ static int atmel_aes_crypt_dma_start(struct atmel_aes_dev *dd)
dd->flags |= AES_FLAGS_FAST; dd->flags |= AES_FLAGS_FAST;
} else { } else {
dma_sync_single_for_cpu(dd->dev, dd->dma_addr_in,
dd->dma_size, DMA_TO_DEVICE);
/* use cache buffers */ /* use cache buffers */
count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset, count = atmel_aes_sg_copy(&dd->in_sg, &dd->in_offset,
dd->buf_in, dd->buflen, dd->total, 0); dd->buf_in, dd->buflen, dd->total, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment