Commit cff9a175 authored by Antoine Tenart's avatar Antoine Tenart Committed by Herbert Xu

crypto: inside-secure - move cache result dma mapping to request

In heavy traffic the DMA mapping is overwritten by multiple requests as
the DMA address is stored in a global context. This patch moves this
information to the per-hash request context so that it can't be
overwritten.

Fixes: 1b44c5a6 ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver")
Signed-off-by: default avatarAntoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent b8592027
...@@ -537,20 +537,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -537,20 +537,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT); EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
} }
void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req)
{
struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
if (ctx->cache) {
dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
DMA_TO_DEVICE);
kfree(ctx->cache);
ctx->cache = NULL;
ctx->cache_sz = 0;
}
}
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring) void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{ {
struct safexcel_command_desc *cdesc; struct safexcel_command_desc *cdesc;
......
...@@ -578,11 +578,6 @@ struct safexcel_context { ...@@ -578,11 +578,6 @@ struct safexcel_context {
int ring; int ring;
bool needs_inv; bool needs_inv;
bool exit_inv; bool exit_inv;
/* Used for ahash requests */
void *cache;
dma_addr_t cache_dma;
unsigned int cache_sz;
}; };
/* /*
...@@ -606,8 +601,6 @@ struct safexcel_inv_result { ...@@ -606,8 +601,6 @@ struct safexcel_inv_result {
void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring); void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring); void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
void safexcel_free_context(struct safexcel_crypto_priv *priv,
struct crypto_async_request *req);
int safexcel_invalidate_cache(struct crypto_async_request *async, int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv, struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring, dma_addr_t ctxr_dma, int ring,
......
...@@ -43,6 +43,9 @@ struct safexcel_ahash_req { ...@@ -43,6 +43,9 @@ struct safexcel_ahash_req {
u64 processed; u64 processed;
u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
dma_addr_t cache_dma;
unsigned int cache_sz;
u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32)); u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
}; };
...@@ -165,7 +168,11 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -165,7 +168,11 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
sreq->result_dma = 0; sreq->result_dma = 0;
} }
safexcel_free_context(priv, async); if (sreq->cache_dma) {
dma_unmap_single(priv->dev, sreq->cache_dma, sreq->cache_sz,
DMA_TO_DEVICE);
sreq->cache_dma = 0;
}
cache_len = sreq->len - sreq->processed; cache_len = sreq->len - sreq->processed;
if (cache_len) if (cache_len)
...@@ -227,24 +234,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, ...@@ -227,24 +234,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
/* Add a command descriptor for the cached data, if any */ /* Add a command descriptor for the cached data, if any */
if (cache_len) { if (cache_len) {
ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async)); req->cache_dma = dma_map_single(priv->dev, req->cache,
if (!ctx->base.cache) {
ret = -ENOMEM;
goto unlock;
}
memcpy(ctx->base.cache, req->cache, cache_len);
ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
cache_len, DMA_TO_DEVICE); cache_len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) { if (dma_mapping_error(priv->dev, req->cache_dma))
ret = -EINVAL; return -EINVAL;
goto free_cache;
}
ctx->base.cache_sz = cache_len; req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1, first_cdesc = safexcel_add_cdesc(priv, ring, 1,
(cache_len == len), (cache_len == len),
ctx->base.cache_dma, req->cache_dma, cache_len, len,
cache_len, len,
ctx->base.ctxr_dma); ctx->base.ctxr_dma);
if (IS_ERR(first_cdesc)) { if (IS_ERR(first_cdesc)) {
ret = PTR_ERR(first_cdesc); ret = PTR_ERR(first_cdesc);
...@@ -328,16 +326,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, ...@@ -328,16 +326,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
for (i = 0; i < n_cdesc; i++) for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
unmap_cache: unmap_cache:
if (ctx->base.cache_dma) { if (req->cache_dma) {
dma_unmap_single(priv->dev, ctx->base.cache_dma, dma_unmap_single(priv->dev, req->cache_dma, req->cache_sz,
ctx->base.cache_sz, DMA_TO_DEVICE); DMA_TO_DEVICE);
ctx->base.cache_sz = 0; req->cache_sz = 0;
} }
free_cache:
kfree(ctx->base.cache);
ctx->base.cache = NULL;
unlock:
spin_unlock_bh(&priv->ring[ring].egress_lock); spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment