Commit 9744fec9 authored by Ofer Heifetz's avatar Ofer Heifetz Committed by Herbert Xu

crypto: inside-secure - remove request list to improve performance

This patch main goal is to improve driver performance by moving the
crypto request from a list to a RDR ring shadow.

This is possible since there is one producer and one consume for this
RDR request shadow and one ring descriptor is left unused.
Doing this change eliminates the use of spinlock when accessing the
descriptor ring and the need to dynamicaly allocate memory per crypto
request.

The crypto request is placed in the first RDR shadow descriptor only
if there are enough descriptors, when the result handler is invoked,
it fetches the first result descriptor from RDR shadow.
Signed-off-by: default avatarOfer Heifetz <oferh@marvell.com>
Signed-off-by: default avatarAntoine Tenart <antoine.tenart@bootlin.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 62469879
...@@ -509,7 +509,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -509,7 +509,6 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
{ {
struct crypto_async_request *req, *backlog; struct crypto_async_request *req, *backlog;
struct safexcel_context *ctx; struct safexcel_context *ctx;
struct safexcel_request *request;
int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
/* If a request wasn't properly dequeued because of a lack of resources, /* If a request wasn't properly dequeued because of a lack of resources,
...@@ -533,16 +532,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -533,16 +532,10 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
} }
handle_req: handle_req:
request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
if (!request)
goto request_failed;
ctx = crypto_tfm_ctx(req->tfm); ctx = crypto_tfm_ctx(req->tfm);
ret = ctx->send(req, ring, request, &commands, &results); ret = ctx->send(req, ring, &commands, &results);
if (ret) { if (ret)
kfree(request);
goto request_failed; goto request_failed;
}
if (backlog) if (backlog)
backlog->complete(backlog, -EINPROGRESS); backlog->complete(backlog, -EINPROGRESS);
...@@ -551,14 +544,8 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -551,14 +544,8 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
* to the engine because the input data was cached, continue to * to the engine because the input data was cached, continue to
* dequeue other requests as this is valid and not an error. * dequeue other requests as this is valid and not an error.
*/ */
if (!commands && !results) { if (!commands && !results)
kfree(request);
continue; continue;
}
spin_lock_bh(&priv->ring[ring].egress_lock);
list_add_tail(&request->list, &priv->ring[ring].list);
spin_unlock_bh(&priv->ring[ring].egress_lock);
cdesc += commands; cdesc += commands;
rdesc += results; rdesc += results;
...@@ -576,7 +563,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -576,7 +563,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
if (!nreq) if (!nreq)
return; return;
spin_lock_bh(&priv->ring[ring].egress_lock); spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests += nreq; priv->ring[ring].requests += nreq;
...@@ -585,7 +572,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring) ...@@ -585,7 +572,7 @@ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
priv->ring[ring].busy = true; priv->ring[ring].busy = true;
} }
spin_unlock_bh(&priv->ring[ring].egress_lock); spin_unlock_bh(&priv->ring[ring].lock);
/* let the RDR know we have pending descriptors */ /* let the RDR know we have pending descriptors */
writel((rdesc * priv->config.rd_offset) << 2, writel((rdesc * priv->config.rd_offset) << 2,
...@@ -617,6 +604,24 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, ...@@ -617,6 +604,24 @@ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
return -EINVAL; return -EINVAL;
} }
inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
int ring,
struct safexcel_result_desc *rdesc,
struct crypto_async_request *req)
{
int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
priv->ring[ring].rdr_req[i] = req;
}
inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
{
int i = safexcel_ring_first_rdr_index(priv, ring);
return priv->ring[ring].rdr_req[i];
}
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring) void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
{ {
struct safexcel_command_desc *cdesc; struct safexcel_command_desc *cdesc;
...@@ -645,21 +650,16 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error) ...@@ -645,21 +650,16 @@ void safexcel_inv_complete(struct crypto_async_request *req, int error)
int safexcel_invalidate_cache(struct crypto_async_request *async, int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv, struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring, dma_addr_t ctxr_dma, int ring)
struct safexcel_request *request)
{ {
struct safexcel_command_desc *cdesc; struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc; struct safexcel_result_desc *rdesc;
int ret = 0; int ret = 0;
spin_lock_bh(&priv->ring[ring].egress_lock);
/* Prepare command descriptor */ /* Prepare command descriptor */
cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma); cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
if (IS_ERR(cdesc)) { if (IS_ERR(cdesc))
ret = PTR_ERR(cdesc); return PTR_ERR(cdesc);
goto unlock;
}
cdesc->control_data.type = EIP197_TYPE_EXTENDED; cdesc->control_data.type = EIP197_TYPE_EXTENDED;
cdesc->control_data.options = 0; cdesc->control_data.options = 0;
...@@ -674,21 +674,20 @@ int safexcel_invalidate_cache(struct crypto_async_request *async, ...@@ -674,21 +674,20 @@ int safexcel_invalidate_cache(struct crypto_async_request *async,
goto cdesc_rollback; goto cdesc_rollback;
} }
request->req = async; safexcel_rdr_req_set(priv, ring, rdesc, async);
goto unlock;
return ret;
cdesc_rollback: cdesc_rollback:
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
unlock:
spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret; return ret;
} }
static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv, static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
int ring) int ring)
{ {
struct safexcel_request *sreq; struct crypto_async_request *req;
struct safexcel_context *ctx; struct safexcel_context *ctx;
int ret, i, nreq, ndesc, tot_descs, handled = 0; int ret, i, nreq, ndesc, tot_descs, handled = 0;
bool should_complete; bool should_complete;
...@@ -703,28 +702,22 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv ...@@ -703,28 +702,22 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
goto requests_left; goto requests_left;
for (i = 0; i < nreq; i++) { for (i = 0; i < nreq; i++) {
spin_lock_bh(&priv->ring[ring].egress_lock); req = safexcel_rdr_req_get(priv, ring);
sreq = list_first_entry(&priv->ring[ring].list,
struct safexcel_request, list); ctx = crypto_tfm_ctx(req->tfm);
list_del(&sreq->list); ndesc = ctx->handle_result(priv, ring, req,
spin_unlock_bh(&priv->ring[ring].egress_lock);
ctx = crypto_tfm_ctx(sreq->req->tfm);
ndesc = ctx->handle_result(priv, ring, sreq->req,
&should_complete, &ret); &should_complete, &ret);
if (ndesc < 0) { if (ndesc < 0) {
kfree(sreq);
dev_err(priv->dev, "failed to handle result (%d)", ndesc); dev_err(priv->dev, "failed to handle result (%d)", ndesc);
goto acknowledge; goto acknowledge;
} }
if (should_complete) { if (should_complete) {
local_bh_disable(); local_bh_disable();
sreq->req->complete(sreq->req, ret); req->complete(req, ret);
local_bh_enable(); local_bh_enable();
} }
kfree(sreq);
tot_descs += ndesc; tot_descs += ndesc;
handled++; handled++;
} }
...@@ -743,7 +736,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv ...@@ -743,7 +736,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
goto handle_results; goto handle_results;
requests_left: requests_left:
spin_lock_bh(&priv->ring[ring].egress_lock); spin_lock_bh(&priv->ring[ring].lock);
priv->ring[ring].requests -= handled; priv->ring[ring].requests -= handled;
safexcel_try_push_requests(priv, ring); safexcel_try_push_requests(priv, ring);
...@@ -751,7 +744,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv ...@@ -751,7 +744,7 @@ static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv
if (!priv->ring[ring].requests) if (!priv->ring[ring].requests)
priv->ring[ring].busy = false; priv->ring[ring].busy = false;
spin_unlock_bh(&priv->ring[ring].egress_lock); spin_unlock_bh(&priv->ring[ring].lock);
} }
static void safexcel_dequeue_work(struct work_struct *work) static void safexcel_dequeue_work(struct work_struct *work)
...@@ -1073,6 +1066,14 @@ static int safexcel_probe(struct platform_device *pdev) ...@@ -1073,6 +1066,14 @@ static int safexcel_probe(struct platform_device *pdev)
if (ret) if (ret)
goto err_reg_clk; goto err_reg_clk;
priv->ring[i].rdr_req = devm_kzalloc(dev,
sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
GFP_KERNEL);
if (!priv->ring[i].rdr_req) {
ret = -ENOMEM;
goto err_reg_clk;
}
ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL); ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
if (!ring_irq) { if (!ring_irq) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -1108,9 +1109,7 @@ static int safexcel_probe(struct platform_device *pdev) ...@@ -1108,9 +1109,7 @@ static int safexcel_probe(struct platform_device *pdev)
crypto_init_queue(&priv->ring[i].queue, crypto_init_queue(&priv->ring[i].queue,
EIP197_DEFAULT_RING_SIZE); EIP197_DEFAULT_RING_SIZE);
INIT_LIST_HEAD(&priv->ring[i].list);
spin_lock_init(&priv->ring[i].lock); spin_lock_init(&priv->ring[i].lock);
spin_lock_init(&priv->ring[i].egress_lock);
spin_lock_init(&priv->ring[i].queue_lock); spin_lock_init(&priv->ring[i].queue_lock);
} }
......
...@@ -512,8 +512,7 @@ struct safexcel_desc_ring { ...@@ -512,8 +512,7 @@ struct safexcel_desc_ring {
void *write; void *write;
void *read; void *read;
/* number of elements used in the ring */ /* descriptor element offset */
unsigned nr;
unsigned offset; unsigned offset;
}; };
...@@ -523,11 +522,6 @@ enum safexcel_alg_type { ...@@ -523,11 +522,6 @@ enum safexcel_alg_type {
SAFEXCEL_ALG_TYPE_AHASH, SAFEXCEL_ALG_TYPE_AHASH,
}; };
struct safexcel_request {
struct list_head list;
struct crypto_async_request *req;
};
struct safexcel_config { struct safexcel_config {
u32 pes; u32 pes;
u32 rings; u32 rings;
...@@ -547,9 +541,7 @@ struct safexcel_work_data { ...@@ -547,9 +541,7 @@ struct safexcel_work_data {
struct safexcel_ring { struct safexcel_ring {
spinlock_t lock; spinlock_t lock;
spinlock_t egress_lock;
struct list_head list;
struct workqueue_struct *workqueue; struct workqueue_struct *workqueue;
struct safexcel_work_data work_data; struct safexcel_work_data work_data;
...@@ -557,6 +549,9 @@ struct safexcel_ring { ...@@ -557,6 +549,9 @@ struct safexcel_ring {
struct safexcel_desc_ring cdr; struct safexcel_desc_ring cdr;
struct safexcel_desc_ring rdr; struct safexcel_desc_ring rdr;
/* result ring crypto API request */
struct crypto_async_request **rdr_req;
/* queue */ /* queue */
struct crypto_queue queue; struct crypto_queue queue;
spinlock_t queue_lock; spinlock_t queue_lock;
...@@ -618,8 +613,7 @@ struct safexcel_crypto_priv { ...@@ -618,8 +613,7 @@ struct safexcel_crypto_priv {
struct safexcel_context { struct safexcel_context {
int (*send)(struct crypto_async_request *req, int ring, int (*send)(struct crypto_async_request *req, int ring,
struct safexcel_request *request, int *commands, int *commands, int *results);
int *results);
int (*handle_result)(struct safexcel_crypto_priv *priv, int ring, int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
struct crypto_async_request *req, bool *complete, struct crypto_async_request *req, bool *complete,
int *ret); int *ret);
...@@ -668,14 +662,14 @@ int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv, ...@@ -668,14 +662,14 @@ int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
void safexcel_complete(struct safexcel_crypto_priv *priv, int ring); void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
int safexcel_invalidate_cache(struct crypto_async_request *async, int safexcel_invalidate_cache(struct crypto_async_request *async,
struct safexcel_crypto_priv *priv, struct safexcel_crypto_priv *priv,
dma_addr_t ctxr_dma, int ring, dma_addr_t ctxr_dma, int ring);
struct safexcel_request *request);
int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *cdr, struct safexcel_desc_ring *cdr,
struct safexcel_desc_ring *rdr); struct safexcel_desc_ring *rdr);
int safexcel_select_ring(struct safexcel_crypto_priv *priv); int safexcel_select_ring(struct safexcel_crypto_priv *priv);
void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring); struct safexcel_desc_ring *ring);
void *safexcel_ring_first_rptr(struct safexcel_crypto_priv *priv, int ring);
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring); struct safexcel_desc_ring *ring);
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
...@@ -688,6 +682,17 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri ...@@ -688,6 +682,17 @@ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *pri
int ring_id, int ring_id,
bool first, bool last, bool first, bool last,
dma_addr_t data, u32 len); dma_addr_t data, u32 len);
int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
int ring);
int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
int ring,
struct safexcel_result_desc *rdesc);
void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
int ring,
struct safexcel_result_desc *rdesc,
struct crypto_async_request *req);
inline struct crypto_async_request *
safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring);
void safexcel_inv_complete(struct crypto_async_request *req, int error); void safexcel_inv_complete(struct crypto_async_request *req, int error);
int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen, int safexcel_hmac_setkey(const char *alg, const u8 *key, unsigned int keylen,
void *istate, void *ostate); void *istate, void *ostate);
......
...@@ -336,7 +336,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -336,7 +336,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0; *ret = 0;
spin_lock_bh(&priv->ring[ring].egress_lock);
do { do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) { if (IS_ERR(rdesc)) {
...@@ -353,7 +352,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -353,7 +352,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
} while (!rdesc->last_seg); } while (!rdesc->last_seg);
safexcel_complete(priv, ring); safexcel_complete(priv, ring);
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (src == dst) { if (src == dst) {
dma_unmap_sg(priv->dev, src, dma_unmap_sg(priv->dev, src,
...@@ -374,7 +372,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -374,7 +372,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
} }
static int safexcel_send_req(struct crypto_async_request *base, int ring, static int safexcel_send_req(struct crypto_async_request *base, int ring,
struct safexcel_request *request,
struct safexcel_cipher_req *sreq, struct safexcel_cipher_req *sreq,
struct scatterlist *src, struct scatterlist *dst, struct scatterlist *src, struct scatterlist *dst,
unsigned int cryptlen, unsigned int assoclen, unsigned int cryptlen, unsigned int assoclen,
...@@ -384,7 +381,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, ...@@ -384,7 +381,7 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_crypto_priv *priv = ctx->priv;
struct safexcel_command_desc *cdesc; struct safexcel_command_desc *cdesc;
struct safexcel_result_desc *rdesc; struct safexcel_result_desc *rdesc, *first_rdesc;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int totlen = cryptlen + assoclen; unsigned int totlen = cryptlen + assoclen;
int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen; int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = totlen;
...@@ -424,8 +421,6 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, ...@@ -424,8 +421,6 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
ctx->opad, ctx->state_sz); ctx->opad, ctx->state_sz);
} }
spin_lock_bh(&priv->ring[ring].egress_lock);
/* command descriptors */ /* command descriptors */
for_each_sg(src, sg, nr_src, i) { for_each_sg(src, sg, nr_src, i) {
int len = sg_dma_len(sg); int len = sg_dma_len(sg);
...@@ -472,12 +467,12 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, ...@@ -472,12 +467,12 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
ret = PTR_ERR(rdesc); ret = PTR_ERR(rdesc);
goto rdesc_rollback; goto rdesc_rollback;
} }
if (first)
first_rdesc = rdesc;
n_rdesc++; n_rdesc++;
} }
spin_unlock_bh(&priv->ring[ring].egress_lock); safexcel_rdr_req_set(priv, ring, first_rdesc, base);
request->req = base;
*commands = n_cdesc; *commands = n_cdesc;
*results = n_rdesc; *results = n_rdesc;
...@@ -490,8 +485,6 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, ...@@ -490,8 +485,6 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring,
for (i = 0; i < n_cdesc; i++) for (i = 0; i < n_cdesc; i++)
safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr); safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (src == dst) { if (src == dst) {
dma_unmap_sg(priv->dev, src, dma_unmap_sg(priv->dev, src,
sg_nents_for_len(src, totlen), sg_nents_for_len(src, totlen),
...@@ -519,7 +512,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ...@@ -519,7 +512,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0; *ret = 0;
spin_lock_bh(&priv->ring[ring].egress_lock);
do { do {
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) { if (IS_ERR(rdesc)) {
...@@ -536,7 +528,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ...@@ -536,7 +528,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
} while (!rdesc->last_seg); } while (!rdesc->last_seg);
safexcel_complete(priv, ring); safexcel_complete(priv, ring);
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) { if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr, dma_pool_free(priv->context_pool, ctx->base.ctxr,
...@@ -612,15 +603,13 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv, ...@@ -612,15 +603,13 @@ static int safexcel_aead_handle_result(struct safexcel_crypto_priv *priv,
} }
static int safexcel_cipher_send_inv(struct crypto_async_request *base, static int safexcel_cipher_send_inv(struct crypto_async_request *base,
int ring, struct safexcel_request *request, int ring, int *commands, int *results)
int *commands, int *results)
{ {
struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm); struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(base->tfm);
struct safexcel_crypto_priv *priv = ctx->priv; struct safexcel_crypto_priv *priv = ctx->priv;
int ret; int ret;
ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring, ret = safexcel_invalidate_cache(base, priv, ctx->base.ctxr_dma, ring);
request);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
...@@ -631,7 +620,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base, ...@@ -631,7 +620,6 @@ static int safexcel_cipher_send_inv(struct crypto_async_request *base,
} }
static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
struct safexcel_request *request,
int *commands, int *results) int *commands, int *results)
{ {
struct skcipher_request *req = skcipher_request_cast(async); struct skcipher_request *req = skcipher_request_cast(async);
...@@ -643,18 +631,16 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring, ...@@ -643,18 +631,16 @@ static int safexcel_skcipher_send(struct crypto_async_request *async, int ring,
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv); BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv) if (sreq->needs_inv)
ret = safexcel_cipher_send_inv(async, ring, request, commands, ret = safexcel_cipher_send_inv(async, ring, commands, results);
results);
else else
ret = safexcel_send_req(async, ring, request, sreq, req->src, ret = safexcel_send_req(async, ring, sreq, req->src,
req->dst, req->cryptlen, 0, 0, req->iv, req->dst, req->cryptlen, 0, 0, req->iv,
commands, results); commands, results);
return ret; return ret;
} }
static int safexcel_aead_send(struct crypto_async_request *async, int ring, static int safexcel_aead_send(struct crypto_async_request *async, int ring,
struct safexcel_request *request, int *commands, int *commands, int *results)
int *results)
{ {
struct aead_request *req = aead_request_cast(async); struct aead_request *req = aead_request_cast(async);
struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req);
...@@ -666,11 +652,10 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring, ...@@ -666,11 +652,10 @@ static int safexcel_aead_send(struct crypto_async_request *async, int ring,
BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv); BUG_ON(!(priv->flags & EIP197_TRC_CACHE) && sreq->needs_inv);
if (sreq->needs_inv) if (sreq->needs_inv)
ret = safexcel_cipher_send_inv(async, ring, request, commands, ret = safexcel_cipher_send_inv(async, ring, commands, results);
results);
else else
ret = safexcel_send_req(async, ring, request, sreq, req->src, ret = safexcel_send_req(async, ring, sreq, req->src, req->dst,
req->dst, req->cryptlen, req->assoclen, req->cryptlen, req->assoclen,
crypto_aead_authsize(tfm), req->iv, crypto_aead_authsize(tfm), req->iv,
commands, results); commands, results);
return ret; return ret;
......
...@@ -160,7 +160,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -160,7 +160,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
*ret = 0; *ret = 0;
spin_lock_bh(&priv->ring[ring].egress_lock);
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) { if (IS_ERR(rdesc)) {
dev_err(priv->dev, dev_err(priv->dev,
...@@ -171,7 +170,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -171,7 +170,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
} }
safexcel_complete(priv, ring); safexcel_complete(priv, ring);
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (sreq->nents) { if (sreq->nents) {
dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE); dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
...@@ -204,7 +202,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin ...@@ -204,7 +202,6 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
} }
static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
struct safexcel_request *request,
int *commands, int *results) int *commands, int *results)
{ {
struct ahash_request *areq = ahash_request_cast(async); struct ahash_request *areq = ahash_request_cast(async);
...@@ -251,16 +248,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, ...@@ -251,16 +248,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
} }
} }
spin_lock_bh(&priv->ring[ring].egress_lock);
/* Add a command descriptor for the cached data, if any */ /* Add a command descriptor for the cached data, if any */
if (cache_len) { if (cache_len) {
req->cache_dma = dma_map_single(priv->dev, req->cache, req->cache_dma = dma_map_single(priv->dev, req->cache,
cache_len, DMA_TO_DEVICE); cache_len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->dev, req->cache_dma)) { if (dma_mapping_error(priv->dev, req->cache_dma))
spin_unlock_bh(&priv->ring[ring].egress_lock);
return -EINVAL; return -EINVAL;
}
req->cache_sz = cache_len; req->cache_sz = cache_len;
first_cdesc = safexcel_add_cdesc(priv, ring, 1, first_cdesc = safexcel_add_cdesc(priv, ring, 1,
...@@ -333,14 +326,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, ...@@ -333,14 +326,12 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
goto unmap_result; goto unmap_result;
} }
spin_unlock_bh(&priv->ring[ring].egress_lock); safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
req->processed[0] += len; req->processed[0] += len;
if (req->processed[0] < len) if (req->processed[0] < len)
req->processed[1]++; req->processed[1]++;
request->req = &areq->base;
*commands = n_cdesc; *commands = n_cdesc;
*results = 1; *results = 1;
return 0; return 0;
...@@ -360,7 +351,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, ...@@ -360,7 +351,6 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
req->cache_sz = 0; req->cache_sz = 0;
} }
spin_unlock_bh(&priv->ring[ring].egress_lock);
return ret; return ret;
} }
...@@ -398,7 +388,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ...@@ -398,7 +388,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
*ret = 0; *ret = 0;
spin_lock_bh(&priv->ring[ring].egress_lock);
rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr); rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
if (IS_ERR(rdesc)) { if (IS_ERR(rdesc)) {
dev_err(priv->dev, dev_err(priv->dev,
...@@ -409,7 +398,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, ...@@ -409,7 +398,6 @@ static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
} }
safexcel_complete(priv, ring); safexcel_complete(priv, ring);
spin_unlock_bh(&priv->ring[ring].egress_lock);
if (ctx->base.exit_inv) { if (ctx->base.exit_inv) {
dma_pool_free(priv->context_pool, ctx->base.ctxr, dma_pool_free(priv->context_pool, ctx->base.ctxr,
...@@ -460,15 +448,14 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, ...@@ -460,15 +448,14 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
} }
static int safexcel_ahash_send_inv(struct crypto_async_request *async, static int safexcel_ahash_send_inv(struct crypto_async_request *async,
int ring, struct safexcel_request *request, int ring, int *commands, int *results)
int *commands, int *results)
{ {
struct ahash_request *areq = ahash_request_cast(async); struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq)); struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
int ret; int ret;
ret = safexcel_invalidate_cache(async, ctx->priv, ret = safexcel_invalidate_cache(async, ctx->priv,
ctx->base.ctxr_dma, ring, request); ctx->base.ctxr_dma, ring);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
...@@ -479,19 +466,17 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async, ...@@ -479,19 +466,17 @@ static int safexcel_ahash_send_inv(struct crypto_async_request *async,
} }
static int safexcel_ahash_send(struct crypto_async_request *async, static int safexcel_ahash_send(struct crypto_async_request *async,
int ring, struct safexcel_request *request, int ring, int *commands, int *results)
int *commands, int *results)
{ {
struct ahash_request *areq = ahash_request_cast(async); struct ahash_request *areq = ahash_request_cast(async);
struct safexcel_ahash_req *req = ahash_request_ctx(areq); struct safexcel_ahash_req *req = ahash_request_ctx(areq);
int ret; int ret;
if (req->needs_inv) if (req->needs_inv)
ret = safexcel_ahash_send_inv(async, ring, request, ret = safexcel_ahash_send_inv(async, ring, commands, results);
commands, results);
else else
ret = safexcel_ahash_send_req(async, ring, request, ret = safexcel_ahash_send_req(async, ring, commands, results);
commands, results);
return ret; return ret;
} }
......
...@@ -24,7 +24,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, ...@@ -24,7 +24,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
if (!cdr->base) if (!cdr->base)
return -ENOMEM; return -ENOMEM;
cdr->write = cdr->base; cdr->write = cdr->base;
cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE; cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
cdr->read = cdr->base; cdr->read = cdr->base;
rdr->offset = sizeof(u32) * priv->config.rd_offset; rdr->offset = sizeof(u32) * priv->config.rd_offset;
...@@ -34,7 +34,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv, ...@@ -34,7 +34,7 @@ int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
if (!rdr->base) if (!rdr->base)
return -ENOMEM; return -ENOMEM;
rdr->write = rdr->base; rdr->write = rdr->base;
rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE; rdr->base_end = rdr->base + rdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
rdr->read = rdr->base; rdr->read = rdr->base;
return 0; return 0;
...@@ -50,14 +50,15 @@ static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv, ...@@ -50,14 +50,15 @@ static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
{ {
void *ptr = ring->write; void *ptr = ring->write;
if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1) if ((ring->write == ring->read - ring->offset) ||
(ring->read == ring->base && ring->write == ring->base_end))
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ring->write += ring->offset;
if (ring->write == ring->base_end) if (ring->write == ring->base_end)
ring->write = ring->base; ring->write = ring->base;
else
ring->write += ring->offset;
ring->nr++;
return ptr; return ptr;
} }
...@@ -66,29 +67,52 @@ void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv, ...@@ -66,29 +67,52 @@ void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
{ {
void *ptr = ring->read; void *ptr = ring->read;
if (!ring->nr) if (ring->write == ring->read)
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
ring->read += ring->offset;
if (ring->read == ring->base_end) if (ring->read == ring->base_end)
ring->read = ring->base; ring->read = ring->base;
else
ring->read += ring->offset;
ring->nr--;
return ptr; return ptr;
} }
inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
int ring)
{
struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
return rdr->read;
}
inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
int ring)
{
struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
return (rdr->read - rdr->base) / rdr->offset;
}
inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
int ring,
struct safexcel_result_desc *rdesc)
{
struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
return ((void *)rdesc - rdr->base) / rdr->offset;
}
void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv, void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
struct safexcel_desc_ring *ring) struct safexcel_desc_ring *ring)
{ {
if (!ring->nr) if (ring->write == ring->read)
return; return;
if (ring->write == ring->base) if (ring->write == ring->base)
ring->write = ring->base_end - ring->offset; ring->write = ring->base_end;
else else
ring->write -= ring->offset; ring->write -= ring->offset;
ring->nr--;
} }
struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv, struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment